text stringlengths 8 6.05M |
|---|
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
import platform # For getting the operating system name
import subprocess # For executing a shell command
# Option for the number of packets as a function of
param = '-n' if platform.system().lower()=='windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
return subprocess.call(command) == 0
def connect_to_db_server(user, password,
database, host="db", port="5432"):
""" Connect to database server with provided environment variables """
from psycopg2 import connect
try:
connection = connect(
user=user,
password=password,
database=database,
host=host,
port=port)
cursor = connection.cursor()
#print("Successfully connected to Postgres Server\n")
return connection
except Exception as e:
#print(f"could not connect to the postgres {e}\n")
return None |
import nltk
from nltk.tokenize import word_tokenize
from collections import Counter
nltk.download('wordnet') #download if using this module for the first time
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
nltk.download('stopwords') #download if using this module for the first time
#For Gensim
import gensim
import string
from gensim import corpora
from gensim.corpora.dictionary import Dictionary
from nltk.tokenize import word_tokenize
# read article for refrence https://www.pluralsight.com/guides/topic-identification-nlp
def extract_topic(data,num_topics=2,num_words=5):
compileddoc = data.split('\n')
stopword = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
def clean(document):
stopwordremoval = " ".join([i for i in document.lower().split() if i not in stopword])
punctuationremoval = ''.join(ch for ch in stopwordremoval if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punctuationremoval.split())
return normalized
final_doc = [clean(document).split() for document in compileddoc]
dictionary = corpora.Dictionary(final_doc)
DT_matrix = [dictionary.doc2bow(doc) for doc in final_doc]
Lda_object = gensim.models.ldamodel.LdaModel
lda_model_1 = Lda_object(DT_matrix, num_topics=num_topics, id2word = dictionary)
return lda_model_1.print_topics(num_topics=num_topics, num_words=num_words) |
import sys
import time
import threading
import vtrace
import vtrace.notifiers as vt_notif
import vdb.testmods as v_testmods
class ExecThreadTest(v_testmods.VtracePythonTest, vt_notif.Notifier):
modname = 'vdb.testmods.execthreadtest'
def __init__(self):
v_testmods.VtracePythonTest.__init__(self)
vt_notif.Notifier.__init__(self)
self.got_thread_create = False
self.got_thread_exit = False
def notify(self, event, trace):
if event == vtrace.NOTIFY_CREATE_THREAD:
self.got_thread_create = True
return
if event == vtrace.NOTIFY_EXIT_THREAD:
self.got_thread_exit = True
return
def runTest(self):
self.trace.setMode('RunForever', True)
self.trace.registerNotifier( vtrace.NOTIFY_ALL, self )
self.trace.run()
assert( self.got_thread_create )
assert( self.got_thread_exit )
assert( self.trace.getMeta('ExitCode', 0) == 35 )
if __name__ == '__main__':
#thr0 = threading.Thread( target=time.sleep, args=( 9999, ) )
#thr0.setDaemon(True)
#thr0.start()
import time; time.sleep(0.2)
#v_testmods.waitForTest()
thr = threading.Thread( target=time.sleep, args=(0.1, ) )
thr.start()
thr.join()
sys.exit(35)
|
from main.page.desktop_v3.sales.pe_myshop_order_base import *
from selenium.webdriver.common.by import By
from random import randint
import time
class MyshopOrderListPage(MyshopOrderBasePage):
_page = "myshop_order_list.pl"
#LOCATORS
#Search Invoice
_search_invoice_bar_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append input.input-medium')
_t_status_select_box_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append a.selectBox')
_start_date_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append input#start-date')
_end_date_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append input#end-date')
_search_invoice_button_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append button.btn')
#Button Sembunyikan/Tampilkan Semua
_collapse_show_all_loc = (By.CSS_SELECTOR, 'div#change-template a#collapse_show_all span#colapse_show_open')
#Jumlah Table transaction
_table_transaction_loc = (By.CSS_SELECTOR, 'div.row-fluid div.span12 div.list-box-content table.transaction-table')
#Invoice Link
_t_invoice_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[3]/div/div/div/table/tbody/tr[1]/td[2]/div[1]/a/b')
#Buyer Name Link
_buyer_name_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[3]/div/div/div/table/tbody/tr[1]/td[1]/a')
#Snapshot Product Link (Single)
_snapshot_product_link = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[3]/div/div/div/table/tbody/tr[2]/td/table/tbody/tr[2]/td[1]/span[2]/a')
#Button Sembunyikan/Tampilkan per transaction
_list_collapse_show_transaction_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[3]/div/div/div/table[1]/tbody/tr[1]/td[2]/div[5]/small[1]')
_last_order_loc = (By.XPATH, "//*[@class='list-box-content']/table")
#Action
def open(self, site=""):
self._open(site, self._page)
def get_last_inv(self):
last_order = self.driver.find_element(*self._last_order_loc)
id_order = last_order.find_element(By.TAG_NAME, "tr").get_attribute("id")
self.inv = self.driver.find_element(By.XPATH, "//*[@id='"+ id_order +"']/td[2]/a/b")
return self.inv.text
|
from .. import app
from ..database.boards import Board
from ..database.categories import Category
from flask import render_template
@app.route("/board/<board_id>/")
def category(board_id):
board = Board.objects(board_id=board_id).first()
categories = Category.objects
category = [category for category in categories if board in category.boards][0]
return render_template("board.html", category=category, board=board, topics=board.topics)
|
'''
OpenCV检测图像中的物体并将物体裁剪下来
步骤 :
step1:加载图片,转成灰度图
step2:用Sobel算子计算x,y方向上的梯度,之后在x方向上减去y方向上的梯度,通过这个减法,我们留下具有高水平梯度和低垂直梯度的图像区域
step3:去除图像上的噪声。首先使用低通滤泼器平滑图像(9 x 9内核),这将有助于平滑图像中的高频噪声。低通滤波器的目标是降低图像的变化率。如将每个像素替换为该像素周围像素的均值。这样就可以平滑并替代那些强度变化明显的区域
step4:在上图中我们看到蜜蜂身体区域有很多黑色的空余,我们要用白色填充这些空余,使得后面的程序更容易识别昆虫区域,这需要做一些形态学方面的操作
step5:从上图我们发现图像上还有一些小的白色斑点,这会干扰之后的昆虫轮廓的检测,要把它们去掉。分别执行4次形态学腐蚀与膨胀
step6:找出昆虫区域的轮廓。cv2.findContours()函数第一个参数是要检索的图片,必须是为二值图,即黑白的(不是灰度图),所以读取的图像要先转成灰度的,再转成二值图,我们在第三步用cv2.threshold()函数已经得到了二值图。
第二个参数表示轮廓的检索模式,有四种:
cv2.RETR_EXTERNAL表示只检测外轮廓
cv2.RETR_LIST检测的轮廓不建立等级关系
cv2.RETR_CCOMP建立两个等级的轮廓,上面的一层为外边界,里面的一层为内孔的边界信息。如果内孔内还有一个连通物体,这个物体的边界也在顶层。
cv2.RETR_TREE建立一个等级树结构的轮廓。
第三个参数为轮廓的近似方法
cv2.CHAIN_APPROX_NONE存储所有的轮廓点,相邻的两个点的像素位置差不超过1,即max(abs(x1-x2),abs(y2-y1))==1
cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
cv2.findContours()函数返回两个值,一个是轮廓本身,还有一个是每条轮廓对应的属性。cv2.findContours()函数返回第一个值是list,list中每个元素都是图像中的一个轮廓,用numpy中的ndarray表示。每一个ndarray里保存的是轮廓上的各个点的坐标。我们把list排序,点最多的那个轮廓就是我们要找的昆虫的轮廓。
OpenCV中通过cv2.drawContours在图像上绘制轮廓。
第一个参数是指明在哪幅图像上绘制轮廓
第二个参数是轮廓本身,在Python中是一个list
第三个参数指定绘制轮廓list中的哪条轮廓,如果是-1,则绘制其中的所有轮廓
第四个参数是轮廓线条的颜色
第五个参数是轮廓线条的粗细
step7:裁剪。box里保存的是绿色矩形区域四个顶点的坐标。我将按下图红色矩形所示裁剪昆虫图像。找出四个顶点的x,y坐标的最大最小值。新图像的高=maxY-minY,宽=maxX-minX
'''
import cv2
import numpy as np
image = cv2.imread("353.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gradX = cv2.Sobel(gray, ddepth=cv2.cv.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.cv.CV_32F, dx=0, dy=1, ksize=-1)
# 从x梯度中减去y梯度
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# 模糊与阈值图像
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# 执行4次形态学腐蚀与膨胀
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
# 求得包含点集最小面积的矩形,这个矩形是可以有偏转角度的,可以与图像的边界不平行
(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rect = cv2.minAreaRect(c)
box = np.int0(cv2.cv.BoxPoints(rect))
cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
cv2.imshow("Image", image)
cv2.imwrite("contoursImage2.jpg", image)
cv2.waitKey(0)
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
hight = y2 - y1
width = x2 - x1
cropImg = image[y1:y1+hight, x1:x1+width]
|
# Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns(filters)
data = get_data(filters)
return columns, data
def get_columns(filters):
if filters.get("report_type") == "Discussion Topic Report":
return [
_("Topic Posted By (User)") + ":Link/User:200",
_("Topic Name") + ":Data:250",
_("Discussion Category") + ":Link/Discussion Category:180",
_("Posted Datetime") + ":Datetime:200"
]
else:
return [
_("Topic Commentar (User)") + ":Link/User:200",
_("Topic Name") + ":Data:250",
_("Comment Count") + ":Int:150"
]
def get_data(filters):
if filters.get("report_type") == "Discussion Topic Report":
return frappe.db.sql(""" select owner,title,blog_category, creation
from `tabDiscussion Topic`
where creation between %(start_time)s and %(end_time)s """,
{"start_time":filters.get("start_time"), "end_time":filters.get("end_time")}, as_list=1)
else:
return frappe.db.sql(""" select com.comment_by, topic.title , count(com.name) from
`tabComment` com
join `tabDiscussion Topic` topic
on com.comment_docname = topic.name
where com.comment_doctype = "Discussion Topic" and com.comment_type = "Comment"
and com.creation between %(start_time)s and %(end_time)s
group by com.comment_by, com.comment_docname """,
{"start_time":filters.get("start_time"), "end_time":filters.get("end_time")}, as_list=1)
|
import os
class Config:
# BASE CONFIG
SECRET_KEY = os.getenv("SECRET_KEY")
STATIC_FOLDER = "static"
TEMPLATES_FOLDER = "templates"
'''
Here we can extend the base Config class to define seperate "Production" and "Development" configs, allowing us to easily test using a local sqlite database, but use a full blown sql server in prod.
'''
class ProductionConfig(Config):
TESTING = False
DEBUG = False
FLASK_ENV = "production"
# DATABASE
DB_FILENAME = "test.db"
SQLALCHEMY_DATABASE_URI = "sqlite:///" + DB_FILENAME
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(Config):
TESTING = True
DEBUG = True
FLASK_ENV = "development"
# DATABASE
DB_FILENAME = "test.db"
SQLALCHEMY_DATABASE_URI = "sqlite:///" + DB_FILENAME
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
def printFile(fileIn):
with open(fileIn, mode="r", encoding='utf8') as myFileIn :
print(myFileIn.read())
def duplicateFile(*a):
print()
print("***duplicate file")
if len(a) == 2:
with open(a[0], mode="r", encoding='utf8') as myOldFile:
with open(a[1], mode="w+", encoding='utf8') as myNewFile:
myNewFile.writelines(myOldFile.readlines())
print("oldFile :", a[0])
printFile(a[0])
print("newFile :", a[1])
printFile(a[1])
elif len(a) == 3:
print("receive dont eccrasement, nothing to do")
else:
print("please use 2 or 3 arguments")
return None
duplicateFile("C:\\Users\\gk\\Documents\\myPython\\Day2\\old.txt", "C:\\Users\\gk\\Documents\\myPython\\Day2\\new.txt")
def cryptFile(fileIn, fileOut, shift):
if shift == 3:
print("Warning : offset is 3 which will have unexpected behavior on Windows for CRCL, nothing to do")
else:
print()
print("***crypt file")
print("fileIn path : ", fileIn)
# print("fileIn content : ", end="")
# printFile(fileIn)
# print("===start crypt")
print("shift :", shift)
with open(fileIn, mode="r", encoding='utf8') as myfilein:
with open(fileOut, mode="w+", encoding='utf8') as myFileOut:
for aChar in myfilein.read() :
aCharCrypted = chr(ord(aChar)+int(shift))
# print(aChar, ord(aChar), "=> ", end="")
# print(aCharCrypted, ord(aCharCrypted))
myFileOut.write(aCharCrypted)
# print("===end crypt")
print("fileOut path : ", fileOut)
# print("fileOut content : ", end="")
# printFile(fileOut)
offset = 3
cryptFile("C:\\Users\\gk\\Documents\\myPython\\Day1\\gnuText.py", "C:\\Users\\gk\\Documents\\myPython\\Day2\\oldCrypted.txt", offset)
cryptFile("C:\\Users\\gk\\Documents\\myPython\\Day2\\oldCrypted.txt","C:\\Users\\gk\\Documents\\myPython\\Day2\\oldDecrypted", -offset)
print()
print("encoding :")
from sys import getdefaultencoding as enc_py
print(enc_py())
from sys import getfilesystemencoding as enc_sys
print(enc_sys()) |
import random
upperbound = int(input("what will the upperbound be"))
lowerbound = 1
randonumber = random.randint(lowerbound,upperbound)
randostring = str(randonumber)
GuessedNumber = input("guess the random number")
numberoftrys = 1
Winnmessage = ("you won")
while GuessedNumber != randostring :
if GuessedNumber == ":)":
print("nice you found a secret")
break
if GuessedNumber == "guesses":
print (f"you have taken {numberoftrys} guesses")
GuessedNumber = input(f"guess the random number, bounds are {lowerbound} to {upperbound}, guess again")
continue
GuessedNumber = int(GuessedNumber)
if GuessedNumber > randonumber:
print("to high,guess again")
if upperbound > GuessedNumber:
upperbound = GuessedNumber
elif GuessedNumber < randonumber:
print ("too low, guess again")
if lowerbound < GuessedNumber:
lowerbound = GuessedNumber
numberoftrys +=1
GuessedNumber = input(f"guess the random number, bounds are {lowerbound} to {upperbound}, guess again")
print(Winnmessage)
print(f"you took {numberoftrys} trys")
|
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
import os
datapath = os.path.join("datasets", "lifesat", "")
# 샘플 코드
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
# 데이터 적재
oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',')
gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t',
encoding='latin1', na_values="n/a")
# 데이터 준비
country_stats = prepare_country_stats(oecd_bli, gdp_per_capita)
X = np.c_[country_stats["GDP per capita"]]
y = np.c_[country_stats["Life satisfaction"]]
# 데이터 시각화
# ax = country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction')
# ax.set(xlabel="1인당 GDP", ylabel="삶의 만족도")
# plt.show()
# 선형 모델 선택
model = sklearn.linear_model.LinearRegression()
# 모델 훈련
print(X)
print(y)
model.fit(X, y)
# 키프로스에 대한 예측
X_new = [[22587]] # 키프로스 1인당 GDP
print(model.predict(X_new)) # 결과 [[ 5.96242338]]
import sklearn.neighbors
# 선형 회귀 모델을 k-최근접 이웃 회귀 모델로 교체할 경우
knn = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3)
# 모델 훈련
knn.fit(X, y)
# 키프로스에 대한 예측
print(knn.predict(X_new)) # 결과 [[ 5.76666667]] |
from gosnu import Connection
with Connection('127.0.0.1') as conn:
conn.connect()
consumey = conn.Consumer()
print(consumey.consume())
print(consumey.consume())
|
import asyncio
import os
from parse_header import HTTPHeader
from mime_type import types
from urllib.parse import unquote
async def dispatch(reader, writer):
header = HTTPHeader()
query = {}
text = None
File = None
flag = 0
file_not_found = 0
count = 0
last_dir = './'
while True:
data = await reader.readline()
message = data.decode()
if data == b'\r\n':
break
if count == 0:
header.parse_header(message)
path = header.get('path')
method = header.get('method')
else:
k, v = message.split(': ')
v = v.replace("\r\n","")
query[k] = v
count = count + 1
print(query)
if method != 'GET' and method != 'HEAD':
flag = 1
if header.get('path') is not None:
path = './' + header.get('path')
else:
path = './/'
path = path[:-1]
path = unquote(path)
if not os.path.isfile(path):
if os.path.isdir(path):
text = html_render(path)
else:
file_not_found = 1
else:
File = open(path,"rb+")
if 'Cookie' in query:
cookie = query['Cookie']
print(cookie)
last_dir = cookie.split('last_dir=')[1]
print(last_dir)
if path == './':
print("redirect")
if(last_dir != './' and last_dir != '..'):
res = last_dir.split('./')[1]
print(last_dir)
writer.writelines([
b'HTTP/1.0 302 Found\r\n',
b'Location: '+b'http://127.0.0.1:8080'+res.encode()+b'/'+b'\r\n',
b'Connection: close\r\n',
b'\r\n',
])
if flag == 1:
writer.writelines([
b'HTTP/1.0 405 Method Not Allowed\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'Connection: close\r\n',
b'\r\n',
b'<html><body>HTTP/1.0 405 Method Not Allowed<body></html>\r\n',
b'\r\n'
])
else:
if text is not None:
writer.writelines([
b'HTTP/1.0 200 OK\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'Connection: close\r\n',
b'Set-Cookie: last_dir='+path.encode()+b';path=/;'+b'\r\n',
b'\r\n',
text.encode()+b'\r\n',
b'\r\n'
])
else:
last_dir = os.path.pardir
if file_not_found:
writer.writelines([
b'HTTP/1.0 404 Not Found\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'Connection: close\r\n',
b'Set-Cookie: last_dir='+last_dir.encode()+b';path=/;'+b'\r\n',
b'\r\n',
b'<html><body>HTTP/1.0 404 Not Found<body></html>\r\n',
b'\r\n'
])
else:
content = File.read()
length = str(os.path.getsize(path))
tmp = path.split('.')
suffix = tmp[-1]
mime_type = types.get(suffix)
if mime_type is None:
mime_type = 'application/octet-stream'
if 'Range' in query:
range_str = query['Range'].split('=')[1]
print(range_str)
s, e = range_str.split('-')
s = int(s)
if e is not '':
e = int(e)
else:
e = int(length)-1
content_length = e-s+1
File.seek(s,0)
if e is not '':
p_content = File.read(e-s+1)
else:
p_content = File.read()
writer.writelines([
b'HTTP/1.0 206 Partial Content\r\n',
b'Content-Type: '+mime_type.encode()+b'; charset=utf-8\r\n',
b'Content-Length: '+str(content_length).encode()+b'\r\n',
b'Content-Range: bytes '+ str(s).encode()+b'-'+str(e).encode()+ b'/'+length.encode()+b'\r\n',
b'Connection: keep-alive\r\n',
b'Set-Cookie: last_dir='+last_dir.encode()+b';path=/;'+b'\r\n',
b'\r\n',
p_content
])
else:
writer.writelines([
b'HTTP/1.0 200 OK\r\n',
b'Content-Type: '+mime_type.encode()+b'; charset=utf-8\r\n',
b'Content-Length: '+length.encode()+b'\r\n',
b'Connection: close\r\n',
b'Set-Cookie: last_dir='+last_dir.encode()+b'path=/;'+b'\r\n',
b'\r\n',
content
])
writer.close()
if File:
File.close()
def html_render(path):
dirs = os.listdir(path)
text = "<html><head><title>Index of"+path+"</title></head>"+\
"<body bgcolor=\"white\">"+\
"<h1>Index of"+path+"</h1><hr><pre>"
for directory in dirs:
text += "<a href=\""+directory+"/\">"+directory+"/</a><br>"
text += "</pre><hr></body></html>"
return text
if __name__ == '__main__':
loop = asyncio.get_event_loop()
coro = asyncio.start_server(dispatch, '127.0.0.1', 8080, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
#!/usr/bin/python
from __future__ import division, print_function, absolute_import, unicode_literals
class Position(object):
def __init__(self, order):
self.x = 0
self.y = 0
self.linear = 0
self.width = 2 ** order
self.height = 2 ** order
self.num_steps = (2 ** order) ** 2
def next(self):
self.linear += 1
if self.x < self.width - 1:
self.x += 1
else:
self.x = 0
self.y += 1
def __str__(self):
return "linear {0} y {1} x {2}".format(self.linear, self.y, self.x)
def notsocurvy(order):
pos = Position(order)
for i in range((2 ** order) ** 2):
yield pos
pos.next()
if __name__ == '__main__':
import sys
order = int(sys.argv[1])
for pos in notsocurvy(order):
print(pos)
|
#python imports
import sys
import os
import subprocess
import json
import time
import requests
from termcolor import colored
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Triage - Description: Retrieves any available data for a target against the Intezer database.
***END DESCRIPTION***
'''
def POE(POE):
apikey = ''
if (POE.logging == True):
LOG = logger()
newlogentry = ''
reputation_dump = ''
reputation_output_data = ''
whois = ''
if (POE.logging == True):
newlogentry = 'Module: IntezerReport'
LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)
for apikeys in POE.apikeys:
for key, value in apikeys.items():
if (POE.debug == True):
print ('[DEBUG] API: ' + str(key) + ' | API Key: ' + str(value))
if (key == 'intezer'):
print ('[*] API key located!')
apikey = value
if (apikey == ''):
print (colored('\r\n[x] Unable to execute IntezerReport - apikey value not input. Please add one to /opt/static/static.conf', 'red', attrs=['bold']))
if (logging == True):
newlogentry = 'Unable to execute IntezerReport - apikey value not input. Please add one to /opt/static/static.conf'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
POE.csv_line += 'N/A,'
return -1
global json
output = POE.logdir + 'IntezerReport.json'
analysis_time = ''
analysis_url = ''
family_name = ''
verdict = ''
FI = fileio()
print (colored('\r\n[*] Running IntezerReport against: ' + POE.target, 'white', attrs=['bold']))
intezer_url = 'https://analyze.intezer.com/api/v2-0'
result_url = '/files/' + POE.SHA256
response = requests.post(intezer_url + '/get-access-token', json={'api_key': apikey})
response.raise_for_status()
session = requests.session()
session.headers['Authorization'] = session.headers['Authorization'] = 'Bearer %s' % response.json()['result']
try:
response = session.get(intezer_url + result_url)
response.raise_for_status()
except:
print ('[-] Intezer exception raised...')
if (response.status_code == 200):
print ('[*] Response 200 from server...')
result = response.json()
result = json.dumps(result, sort_keys=False, indent=4)
if (POE.debug==True):
print(str(result))
try:
FI.WriteLogFile(output, result)
print (colored('[*] Intezer malware report data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
if (POE.logging == True):
if (POE.nolinksummary == False):
newlogentry = 'Intezer malware report data has been generated to file here: <a href=\"' + output + '\"> Intezer Summary </a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[x] Unable to write Intezer malware report data to file', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to write Intezer malware report data to file'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
result = json.loads(result)
analysis_url = result['result']['analysis_url']
print ('[*] Intezer analysis URL: ' + analysis_url)
newlogentry = 'Intezer analysis URL: ' + analysis_url
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
analysis_time = result['result']['analysis_time']
print ('[*] Intezer analysis time: ' + analysis_time)
newlogentry = 'Intezer analysis time: ' + analysis_time
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
try:
family_name = result['result']['family_name']
print ('[*] Intezer malware family designation: ' + family_name)
newlogentry = 'Intezer malware family designation: ' + family_name
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print ('[-] Intezer malware family designation unavailable! ')
newlogentry = 'Intezer malware family designation unavailable! '
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
verdict = result['result']['verdict']
print ('[*] Intezer verdict: ' + verdict)
newlogentry = 'Intezer verdict: ' + verdict
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
else:
print ('[-] Intezer response HTTP status code: ' + str(response.status_code))
print (colored('[-] Unable to locate sample...', 'yellow', attrs=['bold']))
newlogentry = 'Unable to locate sample...'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
return 0
|
from django.db import models
import datetime as dt
from django.contrib.auth.models import User
# Create your models here.
#-----------------Profile modules-------------#
class Profile(models.Model):
'''
A class that defines the profile blueprint of the User
'''
profile_photo = models.ImageField(upload_to = 'profiles/', null=True)
name = models.CharField(max_length =30,null=True)
# estate = models.ForeignKey(Neighborhood,on_delete=models.CASCADE, null=True,blank=True)
user = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.name
#---------------------Neighbourhood modules-----------------#
class Neighborhood(models.Model):
'''
A class that defines the blueprint of a Neighborhood model
'''
neighborhood_name = models.CharField(max_length =30,null=True)
neighborhood_location = models.CharField(max_length =30, null =True)
population = models.PositiveIntegerField(default=0)
user = models.ForeignKey(User)
def __str__(self):
return self.neighborhood_name
def create_neighborhood(self):
'''
Metuserhods that saves a new neighborhood
'''
self.save()
def delete_neighborhood(self):
'''
Methods that deletes an exiting neighborhood
'''
self.delete()
@classmethod
def get_neighborhoods(cls):
'''
Methods that fetches all hoods
'''
estates = Neighborhood.objects.all()
return estates
@classmethod
def get_specific_hood(cls,id):
'''
fetches particular hood in the exiting neighborhood
'''
chosen_hood = cls.objects.get(id=id)
return chosen_hood
def update_neighborhood(self):
'''
Methods that updates an exiting neighborhood
'''
pass
def update_occupants(self):
email = models.EmailField(max_length=70,blank=True)
'''
Methods that updates the population size
'''
pass
@classmethod
def find_neighbourhood(neigborhood_id):
'''
Method to search for a particular neighbourhood
'''
query = cls.objects.filter(name__icontains=search_term)
return query
#------------Follow Module-------------#
class Follow(models.Model):
'''
Class that store a User and Profile follow neighborhood news
'''
user = models.ForeignKey(User)
estate = models.ForeignKey(Neighborhood)
def __str__(self):
return self.user.username
@classmethod
def get_following(cls,user_id):
following = Follow.objects.filter(user=user_id).all()
return following
#-----------------------------Business modules-----------------------#
class Business(models.Model):
'''
A class that defines the business blueprint
'''
cover_image = models.ImageField(upload_to = 'business/', null=True, blank=True)
business_name = models.CharField(max_length =30,null=True)
email = models.EmailField(max_length=70,blank=True)
estate = models.ForeignKey(Neighborhood,on_delete=models.CASCADE,null=True,blank=True)
user = models.ForeignKey(User,on_delete=models.CASCADE,null=True,blank=True)
def __str__(self):
return self.business_name
@classmethod
def get_specific_business(cls,id):
'''
fetches particular hooddeletes an exiting neighborhood
'''
business = cls.objects.filter(id=id)
return business
@classmethod
def get_businesses(cls):
'''
fetches particular hooddeletes an exiting neighborhood
'''
business = cls.objects.all()
return business
@classmethod
def get_business_by_estate(cls,hood_id):
'''
Method that gets all posts in a specific neighbourhood from the database
Returns:
messages : list of post objects from the database
'''
messages = cls.objects.all().filter(estate=hood_id)
return messages
class Post(models.Model):
'''
A class that defines posts of the users
'''
image = models.ImageField(upload_to = 'photos/', null = True,blank=True,)
image_name = models.CharField(max_length=30)
message =models.TextField(max_length = 100, null =True,blank=True)
date_uploaded = models.DateTimeField(auto_now_add=True, null=True)
estate = models.ForeignKey(Neighborhood,null =True,blank=True, on_delete=models.CASCADE)
user = models.ForeignKey(User)
class Meta:
ordering = ['-date_uploaded']
def save_post(self):
'''
Method to save an post in the database
'''
self.save()
def delete_post(self):
''' Method to delete an post from the database'''
self.delete()
@classmethod
def get_posts(cls):
'''
Method that gets all posts from the database
Returns:
messages : list of post objects from the database
'''
messages = cls.objects.all()
return messages
@classmethod
def get_posts_by_estate(cls,hood_id):
'''
Method that gets all posts in a specific neighbourhood from the database
Returns:
messages : list of post objects from the database
'''
messages = cls.objects.all().filter(estate=hood_id)
return messages
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from analyse_immo.database import Database
from analyse_immo.impots.ligne import Ligne
from analyse_immo.impots.irpp import IRPP, L1AJ_salaire, L1BJ_salaire, L7UF_dons, L7AE_syndicat
from analyse_immo.impots.annexe_2044 import Annexe_2044, L211_loyer_brut
class TestLigne(unittest.TestCase):
def testEqual(self):
ligne = Ligne(100, 'nom')
self.assertEqual(ligne, ligne)
class TestIRPP(unittest.TestCase):
def setUp(self):
self.database = Database()
def testInit(self):
_ = IRPP(None, 0, 0, 0)
def testRevenuFiscaleReference(self):
irpp = IRPP(self.database, 0, 0, 0)
irpp.add_ligne(L1AJ_salaire, 31407)
irpp.add_ligne(L1BJ_salaire, 23055)
self.assertEqual(irpp.revenu_fiscale_reference, 49015.80, 2)
def testImpotBrutInternal(self):
irpp = IRPP(self.database, 0, 0, 0)
tmi = [[10084, 0], [25710, 0.11], [73516, 0.30], [158122, 0.41]]
ibrut = irpp._impots_brut(tmi, 5000)
self.assertEqual(ibrut, 0)
ibrut = irpp._impots_brut(tmi, 10000)
self.assertEqual(ibrut, 0)
ibrut = irpp._impots_brut(tmi, 10084)
self.assertEqual(ibrut, 0)
ibrut = irpp._impots_brut(tmi, 10085)
self.assertEqual(ibrut, 0)
ibrut = irpp._impots_brut(tmi, 10086)
self.assertEqual(ibrut, 0.11)
ibrut = irpp._impots_brut(tmi, 15000)
self.assertAlmostEqual(ibrut, 540.65, 2)
ibrut = irpp._impots_brut(tmi, 20000)
self.assertAlmostEqual(ibrut, 1090.65, 2)
ibrut = irpp._impots_brut(tmi, 30000)
self.assertAlmostEqual(ibrut, 3005.45, 2)
ibrut = irpp._impots_brut(tmi, 80000)
self.assertAlmostEqual(ibrut, 18718.28, 2)
def testImpotBrut(self):
irpp = IRPP(self.database, 2019, 2.5, 1)
irpp.add_ligne(L1AJ_salaire, 31407)
irpp.add_ligne(L1BJ_salaire, 23055)
self.assertAlmostEqual(irpp.impots_brut, 3339, 0)
self.assertAlmostEqual(irpp.impots_brut, 3339.46, 2)
def testImpotNet(self):
irpp = IRPP(self.database, 2019, 2.5, 1)
irpp.add_ligne(L1AJ_salaire, 31407)
irpp.add_ligne(L1BJ_salaire, 23055)
irpp.add_ligne(L7UF_dons, 200)
irpp.add_ligne(L7AE_syndicat, 143)
self.assertAlmostEqual(irpp.impots_net, 3113, 0)
self.assertAlmostEqual(irpp.impots_net, 3113.08, 2)
def testImpotSalaireNetA(self):
irpp = IRPP(self.database, 2019, 2, 0)
irpp.add_ligne(L1AJ_salaire, 30000)
irpp.add_ligne(L1BJ_salaire, 20000)
annexe_2044 = Annexe_2044(self.database)
annexe_2044.add_ligne(L211_loyer_brut, 5000)
irpp.annexe_2044 = annexe_2044
self.assertAlmostEqual(annexe_2044.prelevement_sociaux, 5000 * .172, 2)
self.assertEqual(irpp.revenu_fiscale_reference, 50000 * .9 + 5000)
self.assertEqual(irpp.quotient_familial, (50000 * .9 + 5000) / 2)
self.assertAlmostEqual(irpp.impots_net, 4182 + 5000 * .172, 0)
self.assertAlmostEqual(irpp.impots_salaires_net, 3482, 0)
def testImpotSalaireNetB(self):
irpp = IRPP(self.database, 2020, 2.5, 1)
irpp.add_ligne(L1AJ_salaire, 31500)
irpp.add_ligne(L1BJ_salaire, 23100)
annexe_2044 = Annexe_2044(self.database)
annexe_2044.add_ligne(L211_loyer_brut, 2212)
irpp.annexe_2044 = annexe_2044
self.assertAlmostEqual(annexe_2044.prelevement_sociaux, 2212 * .172, 2)
self.assertEqual(irpp.revenu_fiscale_reference, 51352)
self.assertAlmostEqual(irpp.quotient_familial, 20541, 0)
self.assertAlmostEqual(irpp.impots_net, 2875 + 2212 * .172, 0)
self.assertAlmostEqual(irpp.impots_salaires_net, 2632, 0)
self.assertAlmostEqual(irpp.impots_revenu_foncier, 243 + 2212 * .172, 0)
def testExemple1(self):
'''
https://www.service-public.fr/particuliers/actualites/A14556?xtor=EPR-141
'''
irpp = IRPP(self.database, 2020, 3, 2)
irpp.add_ligne(L1AJ_salaire, 55950 / 0.9)
self.assertEqual(irpp.revenu_net_impossable, 55950)
self.assertEqual(irpp.quotient_familial, 18650)
self.assertAlmostEqual(irpp.impots_net, 2826.45, 2)
self.assertAlmostEqual(irpp.impots_net, 2826, 0)
def testExemple2(self):
'''
https://www.service-public.fr/particuliers/vosdroits/F1419
'''
irpp = IRPP(self.database, 2020, 1, 0)
irpp.add_ligne(L1AJ_salaire, 30000 / 0.9)
self.assertAlmostEqual(irpp.revenu_net_impossable, 30000, 0)
self.assertAlmostEqual(irpp.quotient_familial, 30000, 0)
self.assertAlmostEqual(irpp.impots_net, 3005.45, 2)
self.assertAlmostEqual(irpp.impots_net, 3005, 0)
def testExemple3(self):
'''
https://www.service-public.fr/particuliers/vosdroits/F1419
'''
irpp = IRPP(self.database, 2020, 2, 0)
irpp.add_ligne(L1AJ_salaire, 60000 / 0.9)
self.assertAlmostEqual(irpp.revenu_net_impossable, 60000, 0)
self.assertAlmostEqual(irpp.quotient_familial, 30000, 0)
self.assertAlmostEqual(irpp.impots_net, 6010.9, 2)
def testExemple4(self):
'''
https://www.service-public.fr/particuliers/vosdroits/F2705
'''
# Sans plafonnement
irpp = IRPP(self.database, 2020, 2, 0)
irpp.add_ligne(L1AJ_salaire, 63000 / 0.9)
self.assertAlmostEqual(irpp.revenu_net_impossable, 63000, 0)
self.assertAlmostEqual(irpp.quotient_familial, 31500, 0)
self.assertAlmostEqual(irpp.impots_net, 6910.90, 2)
# Avec plafonnement
irpp = IRPP(self.database, 2020, 2.5, 1)
irpp.add_ligne(L1AJ_salaire, 63000 / 0.9)
self.assertAlmostEqual(irpp.revenu_net_impossable, 63000, 0)
self.assertAlmostEqual(irpp.quotient_familial, 25200, 0)
self.assertNotEqual(irpp.impots_net, 4157) # Dépassement
self.assertAlmostEqual(irpp.impots_net, 5341, 0)
def testExemple6(self):
'''
http://impotsurlerevenu.org/exemple/124-celibataire-sans-enfant-revenus-eleves.php
'''
irpp = IRPP(self.database, 2018, 1, 0)
irpp.add_ligne(L1AJ_salaire, 37133)
self.assertAlmostEqual(irpp.revenu_net_impossable, 37133 * .9, 0)
self.assertAlmostEqual(irpp.quotient_familial, 33420, 0)
self.assertAlmostEqual(irpp.impots_net, 4227, 0)
@unittest.skip('')
def testExemple7(self):
'''
http://impotsurlerevenu.org/exemple/125-couple-marie-sans-enfant-revenus-eleves.php
'''
irpp = IRPP(self.database, 2018, 2, 0)
irpp.add_ligne(L1AJ_salaire, 146256)
self.assertAlmostEqual(irpp.revenu_net_impossable, 146256 * .9, 0)
self.assertAlmostEqual(irpp.quotient_familial, 66877, 0)
self.assertAlmostEqual(irpp.impots_net, 28530, 0)
def testExemple8(self):
'''
http://impotsurlerevenu.org/exemple/126-couple-marie-sans-enfant-revenus-modestes.php
'''
irpp = IRPP(self.database, 2018, 2, 0)
irpp.add_ligne(L1AJ_salaire, 44467)
self.assertAlmostEqual(irpp.revenu_net_impossable, 44467 * .9, 0)
self.assertAlmostEqual(irpp.quotient_familial, 20010, 0)
self.assertAlmostEqual(irpp.impots_net, 2813, 0)
def testExemple10(self):
'''
http://impotsurlerevenu.org/exemple/128-couple-marie-avec-enfants-aux-revenus-modestes.php
'''
irpp = IRPP(self.database, 2018, 3, 1)
irpp.add_ligne(L1AJ_salaire, 39519)
self.assertAlmostEqual(irpp.revenu_net_impossable, 39519 * .9, 0)
self.assertAlmostEqual(irpp.quotient_familial, 11856, 0)
self.assertAlmostEqual(irpp.impots_net, 794, 0)
def testExemple11(self):
'''
http://impotsurlerevenu.org/exemple/129-couple-marie-avec-enfants-revenus-eleves.php
'''
irpp = IRPP(self.database, 2018, 3, 2)
irpp.add_ligne(L1AJ_salaire, 123895)
self.assertAlmostEqual(irpp.revenu_net_impossable, 123895 * .9, 0)
self.assertAlmostEqual(irpp.quotient_familial, 37169, 0)
self.assertAlmostEqual(irpp.impots_net, 18753, 0)
def testExemple12(self):
'''
http://impotsurlerevenu.org/exemple/130-couple-marie-avec-1-enfant-aux-revenus-eleves.php
'''
irpp = IRPP(self.database, 2018, 2.5, 1)
irpp.add_ligne(L1AJ_salaire, 85331)
self.assertAlmostEqual(irpp.revenu_net_impossable, 85331 * .9, 0)
self.assertAlmostEqual(irpp.quotient_familial, 30719, 0)
self.assertAlmostEqual(irpp.impots_net, 9891, 0)
def testExemple20(self):
'''
http://impotsurlerevenu.org/nouveautes-impot-2019/1203-bareme-impot-2019.php
'''
irpp = IRPP(self.database, 2018, 3, 2)
irpp.add_ligne(L1AJ_salaire, 60000 / 0.9)
self.assertAlmostEqual(irpp.revenu_net_impossable, 60000, 0)
self.assertAlmostEqual(irpp.quotient_familial, 20000, 0)
self.assertAlmostEqual(irpp.impots_net, 4215, 0)
def testExemple21(self):
'''
http://impotsurlerevenu.org/comprendre-le-calcul-de-l-impot/1194-calcul-de-l-impot-2018.php
'''
irpp = IRPP(self.database, 2017, 3, 2)
irpp.add_ligne(L1AJ_salaire, 89000 / 0.9)
self.assertAlmostEqual(irpp.revenu_net_impossable, 89000, 0)
self.assertAlmostEqual(irpp.quotient_familial, 29667, 0)
self.assertAlmostEqual(irpp.impots_net, 12232, 0)
class TestIRPPAnnexe2044(unittest.TestCase):
def setUp(self):
self.database = Database()
def testAnnexe2044(self):
irpp = IRPP(self.database, 2019, 2, 0)
irpp.add_ligne(L1AJ_salaire, 30000)
irpp.add_ligne(L1BJ_salaire, 20000)
annexe_2044 = Annexe_2044(self.database)
annexe_2044.add_ligne(L211_loyer_brut, 6000)
irpp.annexe_2044 = annexe_2044
self.assertEqual(irpp.revenu_fiscale_reference, 51000)
self.assertTrue(isinstance(irpp.annexe_2044, Annexe_2044))
if __name__ == '__main__':
unittest.main()
|
#I pledge my honor I have abided by the Stevens Honor code - Tyson Werner
def squaring(numbers):
return [float(i) * float(i) for i in numbers]
def main():
numbers = input("Enter a list of numbers separated by a space").split()
print(squaring(numbers))
main()
|
for _ in range(int(input())):
i = list(map(int, input().split()))
print(2-i[0]+i[1])
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs, locale
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
import os
import re
from datetime import timedelta, datetime
from flask import Flask, jsonify
import logging
# add environment variables using 'heroku config:add VARIABLE_NAME=variable_name'
DEBUG = os.environ.get('DEBUG', 'True').lower() == 'true'
TESTING = DEBUG
app = Flask(__name__)
app.config.from_object(__name__)
# logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
if app.debug:
app.logger.setLevel(logging.DEBUG)
app.logger.info('Running in debug mode')
else:
app.logger.info('Running in prod mode')
@app.route('/')
def root():
return jsonify(result="success")
if __name__=='__main__':
port = int(os.environ.get('PORT', 5000))
host = '0.0.0.0' if os.environ.get('HEROKU') else '127.0.0.1'
app.logger.info("Starting server at %s:%d" % (host, port))
app.run(host=host, port=port, debug=app.debug)
app.logger.info("Server shuting down")
|
"""
网格:
"""
from matplotlib import pyplot as plt
import numpy as np
x = np.arange(10)
plt.plot(x, x**2, 'r')
plt.grid(color='r', lw='1', ls='--')
# lw表示线宽,ls表示线型
plt.show()
|
import numpy as np
def sentence_rep(listOfWordVector, comb_func = np.mean):
"""
Senetence representation stemming from word vectors
listOfWordVector: a list of word vectors
comb_func: a function that combines a list of word vectors into one single vector
returns: a single vector representing the sentence
"""
return comb_func(listOfWordVector)
def init_GloVe(glove_dim = 50):
glove_src = os.path.join(GLOVE_HOME, 'glove.6B.{}d.txt'.format(glove_dim))
# Creates a dict mapping strings (words) to GloVe vectors:
GLOVE = utils.glove2dict(glove_src)
return GLOVE
def glove2dict(src_filename):
"""
From CS224U github utils
GloVe Reader.
Parameters
----------
src_filename : str
Full path to the GloVe file to be processed.
Returns
-------
dict
Mapping words to their GloVe vectors.
"""
data = {}
with open(src_filename) as f:
while True:
try:
line = next(f)
line = line.strip().split()
data[line[0]] = np.array(line[1: ], dtype=np.float)
except StopIteration:
break
except UnicodeDecodeError:
pass
return data
def glove_vec(w):
"""Return `w`'s GloVe representation if available, else return
a random vector.
From CS224U github utils
"""
return GLOVE.get(w, randvec(w, n=glove_dim)) |
from django.db import models
class User(models.Model):
"""用户类型"""
username = models.CharField(max_length=31, verbose_name='用户名')
password = models.CharField(max_length=31, verbose_name='密码')
sex = models.BooleanField(default=1, verbose_name='性别')
birth = models.DateField(auto_now_add=True, verbose_name='生日')
udesc = models.TextField(null=True, verbose_name='自我简介')
head_img = models.ImageField(upload_to='headimg', null=True, verbose_name='头像')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
update_time = models.DateTimeField(auto_now=True, verbose_name='修改时间')
login_time = models.DateTimeField(auto_now=True)
idDelete = models.BooleanField(default=0)
class Meta:
db_table = 'tb_user'
|
import time
from snap7 import util
from snap7 import client
from LoadTags import loadTags
from LoadDBs import loadDBs
from LoadMeasur import loadMeasur
from LoadGrupos import loadGrupos
from LoadGrupos import check_group_time
import logging
from restart import restart_program
# initialize the log settings
logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', datefmt='%d/%m/%Y %H:%M:%S',filename = 'historian.log', level = logging.INFO)
# Configuracao do PLC
ipSiemens = '192.168.0.266'
def conection(ipSiemens):
if not plc.get_connected() :
try:
print "Conectando ao PLC Siemens..."
plc.connect(ipSiemens, 0, 2) # connect to PLC
msg1 = "Conectado ao PLC Siemens, IP:" + ipSiemens
print msg1
return True
except Exception as e:
print("Erro: "+str(e))
return False
else:
return True
#converte o byte lido no PLC para seu respectivo tipo
def read_tags():
"""
length: Proxima posicao pos a variavel desejada
start: location we are going to start the read
"""
try:
# Leitura das DBs pode ser ate em outra funcao
for db in dbs.values():
db.get_values(plc)
for tag in tags:
tag.set_value(dbs[str(tag.db)].data)
if tag.write:
mss[tag.measur].changed = True
#print tag.get_valor()
check_group_time(grupos)
return True
except Exception as e:
print e
return False #log_error("PLC Siemens falha de leitura na db:",db,cur)
if __name__ == "__main__":
tags = loadTags()
dbs = loadDBs()
mss = loadMeasur(tags)
grupos = loadGrupos(mss)
plc = client.Client()
erro = 0
while True:
if conection(ipSiemens) :
try:
read_tags()
time.sleep(1)
erro = 0
except:
erro += 1
else:
erro += 1
if erro >=1:
restart_program()
erro = 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-29 18:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api_', '0002_auto_20170823_2011'),
]
operations = [
migrations.RenameModel(
old_name='FaAtEvent',
new_name='EventPublic',
),
migrations.RemoveField(
model_name='bandatevent',
name='band',
),
migrations.RemoveField(
model_name='bandatevent',
name='event',
),
migrations.RemoveField(
model_name='bandatevent',
name='proposition',
),
migrations.AlterModelOptions(
name='eventpublic',
options={'verbose_name_plural': 'EventsPublic'},
),
migrations.RenameField(
model_name='band',
old_name='fans_amount',
new_name='fas_amount',
),
migrations.RenameField(
model_name='band',
old_name='genres',
new_name='music_genres',
),
migrations.RenameField(
model_name='fa',
old_name='favorite_genres',
new_name='music_genres',
),
migrations.RemoveField(
model_name='band',
name='events',
),
migrations.AddField(
model_name='event',
name='band',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='events', to='api_.Band'),
),
migrations.AddField(
model_name='event',
name='music_genres',
field=models.ManyToManyField(to='api_.MusicGenre'),
),
migrations.AddField(
model_name='event',
name='public',
field=models.ManyToManyField(through='api_.EventPublic', to='api_.Fa'),
),
migrations.AddField(
model_name='host',
name='music_genres',
field=models.ManyToManyField(to='api_.MusicGenre'),
),
migrations.AlterField(
model_name='event',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='api_.Host'),
),
migrations.AlterField(
model_name='proposition',
name='band',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='propositions', to='api_.Band'),
),
migrations.AlterField(
model_name='proposition',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='propositions', to='api_.Event'),
),
migrations.RemoveField(
model_name='event',
name='presenting_bands',
),
migrations.AlterUniqueTogether(
name='event',
unique_together=set([('name', 'host', 'starts_at')]),
),
migrations.AlterUniqueTogether(
name='eventpublic',
unique_together=set([('fa', 'event')]),
),
migrations.RemoveField(
model_name='proposition',
name='ends_at',
),
migrations.RemoveField(
model_name='proposition',
name='starts_at',
),
migrations.AlterUniqueTogether(
name='proposition',
unique_together=set([('band', 'event')]),
),
migrations.DeleteModel(
name='BandAtEvent',
),
]
|
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import sys
X = pd.read_csv(sys.argv[3],low_memory=False)
X_test = pd.read_csv(sys.argv[5],low_memory=False)
X=np.array(X,dtype = float)
X_test = np.array(X_test,dtype = float)
X_mean = np.mean(X,axis =0,keepdims = True)
X_std = np.std(X,axis = 0,keepdims = True)
X = (X-X_mean)/X_std
X_test = (X_test-X_mean)/X_std
#Y = pd.read_csv("Y_train",low_memory=False)
#Y = np.array(Y,dtype = float)
Y = np.loadtxt(sys.argv[4],dtype=np.float,delimiter=',')
Y = np.reshape(Y,(-1,1))
bias = np.ones((32561,1))
biastest = np.ones((16281,1))
X = np.concatenate((X,bias),axis = 1)
X_test = np.concatenate((X_test,biastest),axis = 1)
print("X_shape:",X.shape,"\nY_shape:",Y.shape)
# In[2]:
class logistic_regression():
def __init__(self):
pass
def _init_para(self):
self.W =np.ones((107,1))
def fit(self, X, Y, valid=None, max_epoch=2000, lr=0.000001):
assert X.shape[0] == Y.shape[0]
self._init_para()
for epoch in range(1, max_epoch+1):
W_grad = -np.dot(X.T,(Y-self.predict(X)))
#print(W_grad.shape)
self.W = self.W -(lr*W_grad)
#print(self.W)
if epoch%100==0:
training_loss = self.loss(X,Y)
#accuracy = self.evaluate(X,Y)
print('[Epoch%5d] - training loss: %5f'%(epoch, training_loss))
def sigmod(self,z):# sigmod(z) = 1/(1+e^-z)
return 1/(1+np.exp(-z))
def predict(self, X, test=False):
return self.sigmod(np.dot(X,self.W))
def loss(self, X, Y, pred=None):
predict_value = self.predict(X)
return -np.sum(Y*np.log(predict_value+0.00000000001)+(1-Y)*(np.log(1-predict_value+0.00000000001)))
def evaluate(self, X, Y):
pred=self.predict(X)
for i in range(len(pred)):
if pred[i]>=0.5:
pred[i] = 1
else:
pred[i] =0
return np.mean(1-np.abs(pred-Y))
# In[3]:
model = logistic_regression()
model.fit(X,Y,max_epoch = 10000)
# In[4]:
predict = model.predict(X_test)
print(predict.shape)
for i in range(len(predict)):
if predict[i]<0.5:
predict[i] = 0
else:
predict[i] = 1
predict =np.array(predict,dtype=int)
# In[5]:
with open(sys.argv[6], 'w') as f:
print('id,label', file=f)
for (i, p) in enumerate(predict) :
print('{},{}'.format(i+1, p[0]), file=f)
# In[ ]:
# In[ ]:
|
from django.test import TestCase
from elections.tests.factories import ElectedRoleFactory
from organisations.models import (
DivisionProblem,
OrganisationGeographyProblem,
OrganisationProblem,
)
from organisations.tests.factories import (
DivisionGeographyFactory,
OrganisationDivisionFactory,
OrganisationDivisionSetFactory,
OrganisationFactory,
OrganisationGeographyFactory,
)
class OrganisationProblemTests(TestCase):
def test_no_geography(self):
org = OrganisationFactory()
OrganisationDivisionSetFactory(organisation=org)
ElectedRoleFactory(organisation=org)
self.assertEqual(len(OrganisationProblem.objects.all()), 1)
problem = OrganisationProblem.objects.all()[0]
self.assertTrue(problem.no_geography)
self.assertFalse(problem.no_divisionset)
self.assertFalse(problem.no_electedrole)
self.assertEqual(
"No associated OrganisationGeography", problem.problem_text
)
def test_no_divisionset(self):
org = OrganisationFactory()
OrganisationGeographyFactory(organisation=org)
ElectedRoleFactory(organisation=org)
self.assertEqual(len(OrganisationProblem.objects.all()), 1)
problem = OrganisationProblem.objects.all()[0]
self.assertFalse(problem.no_geography)
self.assertTrue(problem.no_divisionset)
self.assertFalse(problem.no_electedrole)
self.assertEqual("No associated DivisionSet", problem.problem_text)
def test_no_electedrole(self):
org = OrganisationFactory()
OrganisationDivisionSetFactory(organisation=org)
OrganisationGeographyFactory(organisation=org)
self.assertEqual(len(OrganisationProblem.objects.all()), 1)
problem = OrganisationProblem.objects.all()[0]
self.assertFalse(problem.no_geography)
self.assertFalse(problem.no_divisionset)
self.assertTrue(problem.no_electedrole)
self.assertEqual("No associated ElectedRole", problem.problem_text)
def test_all_ok(self):
org = OrganisationFactory()
OrganisationDivisionSetFactory(organisation=org)
OrganisationGeographyFactory(organisation=org)
ElectedRoleFactory(organisation=org)
self.assertEqual(len(OrganisationProblem.objects.all()), 0)
def test_all_broken(self):
OrganisationFactory()
self.assertEqual(len(OrganisationProblem.objects.all()), 1)
problem = OrganisationProblem.objects.all()[0]
self.assertTrue(problem.no_geography)
self.assertTrue(problem.no_divisionset)
self.assertTrue(problem.no_electedrole)
self.assertEqual(
"No associated OrganisationGeography", problem.problem_text
)
class OrganisationGeographyProblemTests(TestCase):
def test_no_gss_code(self):
og = OrganisationGeographyFactory()
og.source = "this is totally fine"
og.gss = ""
og.save()
self.assertEqual(len(OrganisationGeographyProblem.objects.all()), 1)
problem = OrganisationGeographyProblem.objects.all()[0]
self.assertTrue(problem.no_gss_code)
self.assertFalse(problem.no_geography)
self.assertFalse(problem.invalid_source)
self.assertEqual("No GSS code", problem.problem_text)
def test_no_geography(self):
og = OrganisationGeographyFactory()
og.source = "this is totally fine"
og.geography = None
og.save()
self.assertEqual(len(OrganisationGeographyProblem.objects.all()), 1)
problem = OrganisationGeographyProblem.objects.all()[0]
self.assertFalse(problem.no_gss_code)
self.assertTrue(problem.no_geography)
self.assertFalse(problem.invalid_source)
self.assertEqual("Geography field is NULL", problem.problem_text)
def test_invalid_source(self):
og = OrganisationGeographyFactory()
og.source = "unknown"
og.save()
self.assertEqual(len(OrganisationGeographyProblem.objects.all()), 1)
problem = OrganisationGeographyProblem.objects.all()[0]
self.assertFalse(problem.no_gss_code)
self.assertFalse(problem.no_geography)
self.assertTrue(problem.invalid_source)
self.assertEqual("Boundary source is invalid", problem.problem_text)
def test_all_ok(self):
og = OrganisationGeographyFactory()
og.source = "this is totally fine"
og.save()
self.assertEqual(len(OrganisationGeographyProblem.objects.all()), 0)
def test_all_broken(self):
og = OrganisationGeographyFactory()
og.source = ""
og.gss = ""
og.geography = None
og.save()
self.assertEqual(len(OrganisationGeographyProblem.objects.all()), 1)
problem = OrganisationGeographyProblem.objects.all()[0]
self.assertTrue(problem.no_gss_code)
self.assertTrue(problem.no_geography)
self.assertTrue(problem.invalid_source)
self.assertEqual("Geography field is NULL", problem.problem_text)
class DivisionProblemTests(TestCase):
def test_no_gss_code(self):
div = OrganisationDivisionFactory()
dg = DivisionGeographyFactory(division=div)
dg.source = "this is totally fine"
dg.save()
self.assertEqual(len(DivisionProblem.objects.all()), 1)
problem = DivisionProblem.objects.all()[0]
self.assertTrue(problem.no_gss_code)
self.assertFalse(problem.no_geography)
self.assertFalse(problem.invalid_source)
self.assertEqual("No GSS code", problem.problem_text)
def test_invalid_source(self):
div = OrganisationDivisionFactory()
div.official_identifier = "gss:X01000001"
div.save()
dg = DivisionGeographyFactory(division=div)
dg.source = "unknown"
dg.save()
self.assertEqual(len(DivisionProblem.objects.all()), 1)
problem = DivisionProblem.objects.all()[0]
self.assertFalse(problem.no_gss_code)
self.assertFalse(problem.no_geography)
self.assertTrue(problem.invalid_source)
self.assertEqual("Boundary source is invalid", problem.problem_text)
def test_no_geography(self):
div = OrganisationDivisionFactory()
div.official_identifier = "gss:X01000001"
div.save()
self.assertEqual(len(DivisionProblem.objects.all()), 1)
problem = DivisionProblem.objects.all()[0]
self.assertFalse(problem.no_gss_code)
self.assertTrue(problem.no_geography)
self.assertTrue(problem.invalid_source)
self.assertEqual(
"No associated DivisionGeography", problem.problem_text
)
def test_all_ok(self):
div = OrganisationDivisionFactory()
div.official_identifier = "gss:X01000001"
div.save()
dg = DivisionGeographyFactory(division=div)
dg.source = "this is totally fine"
dg.save()
self.assertEqual(len(DivisionProblem.objects.all()), 0)
def test_all_broken(self):
div = OrganisationDivisionFactory()
div.save()
dg = DivisionGeographyFactory(division=div)
dg.source = ""
dg.save()
self.assertEqual(len(DivisionProblem.objects.all()), 1)
problem = DivisionProblem.objects.all()[0]
self.assertTrue(problem.no_gss_code)
self.assertTrue(problem.invalid_source)
self.assertTrue(problem.invalid_source)
self.assertEqual("No GSS code", problem.problem_text)
|
# [graphene] https: // docs.graphene-python.org/en/latest/quickstart/
# ===================a simple example============================================
from graphene import ObjectType, Field, Schema
from graphene import ObjectType, String
from graphene import ObjectType, String, Field, Schema
from collections import namedtuple
import graphene
from graphene import ObjectType,String,Schema
class Query(ObjectType):
hello = String(name=String(default_value='stranger'))
goodbye = String()
def resolve_hello(root,info,name):
return f'Hello {name}!'
def resolve_goodbye(root,info):
return 'See ya!'
schema = Schema(Query)
result = schema.execute(
'''
{
hello
goodbye
}
'''
)
print(result.data['hello'])
#===================Types Reference====================
#----------Schema------------
'''
Schema:defines the types and relationship between Fields
by supplying the root ObjectType of each operation.
# Query:fetches data
# Mutation:changes data and retrieve the changes
# Subscription:sends changes to clients in real time
'''
schema = Schema (
query=MyRootQuery,
mutation=MyRootMutation,
subscription=MyRootSubscription,
types=[SomeUnknowObjectType], # 一些Schema无法理解的ObjectType放这里
auto_camelcase=False, #控制自动转换命名方式
)
#--------------
'''
默认情况下下划线字段名会被自动转换为驼峰式命名,除非用name参数指定
'''
class Person(ObjectType):
last_name = String() # lastName
other_name = String(name='_other_name') # _other_name
#---------Scalars----------
'''
graphene.String
graphene.Int
graphene.Float
graphene.Boolean
graphene.ID
graphene.types.datetime.Date
graphene.types.datetime.Time
graphene.types.json.JSONString
'''
# All Scalar types accept the following arguments.
class ModelMap(ObjectType):
field_name = String(
name,
description,
required,
deprecation_reason,
default_value
)
# mount scalar with params
mount_field = graphene.Field(graphene.String, to=graphene.String())
# Is equivalent to:
mount_field_ = graphene.Field(graphene.String, to=graphene.Argument(graphene.String))
#------NonNull and List------
class ModelMap(ObjectType):
non_null_field = graphene.NonNull(graphene.String)
#equivalent to
required_field = graphene.String(required=True)
list_fields = graphene.List(graphene.String)
non_null_list_fields = graphene.List(graphene.NonNull(graphene.String))
# SDL
# type ModelMap {
# nonNullField:String!
# nonNullListFields:[String!]
# }
#------------ObjectType---------
class PersonMap(ObjectType):
first_name = String() # use default resolver
last_name = String() # use default resolver
full_name=String()
def resolve_full_name(parent,info):
# parent:
# 指向父对象PersonMap
# info:
#引用有关当前GraphQL查询执行的元信息(字段,架构,已解析的查询等)
#访问每个请求context,可用于存储用户身份验证,数据加载器实例或任何其他可用于解决查询的内容。
return f"{parent.first_name} {parent.last_name}"
# SDL
# type PersonMap{
# firstName:String,
# lastName:String,
# fullName:String
# }
#resolver
# 所有解析器方法都被隐式地视为静态方法。这意味着,解析器的第一个参数永远不会是self
# return 类型是dict则按key匹配,为其他类型则按参数名匹配
PersonValueObject = namedtuple('Person', ['first_name', 'last_name'])
class Person(ObjectType):
first_name = String()
last_name = String()
class Query(ObjectType):
me = Field(Person)
my_best_friend = Field(Person)
def resolve_me(parent, info):
# always pass an object for `me` field
return PersonValueObject(first_name='Luke', last_name='Skywalker')
def resolve_my_best_friend(parent, info):
# always pass a dictionary for `my_best_fiend_field`
return {"first_name": "R2", "last_name": "D2"}
schema = Schema(query=Query)
result = schema.execute('''
{
me { firstName lastName }
myBestFriend { firstName lastName }
}
''')
# default arguments
class Query(ObjectType):
hello = String(required=True, name=String())
def resolve_hello(parent, info, **kwargs):
name = kwargs.get('name', 'World')
return f'Hello, {name}!'
def resolve_hello(parent, info, name='World'):
return f'Hello, {name}!'
class Query(ObjectType):
hello = String(
required=True,
name=String(default_value='World')
)
def resolve_hello(parent, info, name):
return f'Hello, {name}!'
# meta class
class MyGraphQlSong(ObjectType):
class Meta:
name = 'Song'
description = 'But if we set the description in Meta, this value is used instead'
# specifies the GraphQL Interfaces that this Object implements.
interfaces = (Node, )
# helps Graphene resolve ambiguous types such as interfaces or Unions.
possible_types = (Song, )
#---------------Enum------------------
# You can create an Enum using classes:
class Episode(graphene.Enum):
NEWHOPE = 4
EMPIRE = 5
JEDI = 6
# But also using instances of Enum:
Episode = graphene.Enum(
'Episode', [('NEWHOPE', 4), ('EMPIRE', 5), ('JEDI', 6)])
#--------interfaces---------------
'''
# 当有多个ObjectType具有共同属性时,可以使用接口简化工作
# 接口必须被ObjectType类实现(impliments),且ObjectType类必须明确包含接口类定义的属性或字段
# 当需要返回一个Object或者不同的Object集合,接口就很有用
when error:
"Abstract type Character must resolve to an Object
type at runtime for field Query.hero ..."
reason:
Graphene doesn’t have enough information to convert
the data object into a Graphene type needed to resolve the Interface
solve: @classmethod
'''
class Character(graphene.Interface):
id = graphene.ID(required=True)
name = graphene.String(required=True)
friends = graphene.List(lambda: Character)
# @classmethod
# def resolve_type(cls,isinstance,info):
# if isinstance.type=='Human':
# return Human
# return Monster
class Human(graphene.ObjectType):
class Meta:
interfaces = (Character,)
height = graphene.Float()
class Monster(graphene.ObjectType):
class Meta:
interfaces = (Character,)
crawler = graphene.Boolean(default_value=True)
class Query(graphene.ObjectType):
hero = graphene.Field(
Character,
required=True,
has_crawler=graphene.Boolean(required=True)
)
def resolve_hero(parent,info,has_crawler):
if has_crawler:
return get_monster(name='big monster')
return get_human(name='oh baby')
schema = graphene.Schema(query=Query,types=[Human,Monster])
# SDL
# interface Character{
# id:ID!
# name:String!
# friends:[Character]
# }
# type Human implements Character{
# id:ID!
# name:String!
# friends:[Character]
# height:String
# }
# type Monster implements Character{
# id:ID!
# name:String!
# friends:[Character]
# crawler:Boolean
# }
# query getHero($hasCrawler:Boolean!){
# hero(hasCrawler:$hasCrawler){
# __typename #Human/Monster
# name
# ...on Human{
# height
# }
# ... on Monster{
# hasCrawler
# }
# }
# }
#--------------Union-------------------
# 联合类:仅仅是返回多个不同类的集合,不需要像接口一样的公共字段
class Human(graphene.ObjectType):
...
class Monster(graphene.ObjectType):
...
class Others(graphene.ObjectType):
...
class SearchResult(graphene.Union):
class Meta:
types = (Human,Monster,Others)
#SDL
# type Human{}
# type Monster{}
# type Others{}
# union SearchResult = Human|Monster|Others
#---------Mutation-----------
class CreatePerson(graphene.Mutation):
class Arguments:
name = graphene.String()
# output fields
ok = graphene.Boolean()
person = graphene.Field(lambda: Person)
# mutate is the function that will be applied once the mutation is called.
def mutate(root,info,name):
person = Person(name=name)
ok = True
return CreatePerson(person=person,ok=ok)
# your Schema
class Person(graphene.ObjectType):
name = graphene.String()
age = graphene.Int()
class MyMutations(graphene.ObjectType):
create_person = CreatePerson.Field()
class Query(graphene.ObjectType):
person = graphene.Field(Person)
schema = graphene.Schema(query=Query,mutation=MyMutations)
schema.execute('''
mutation myMutation{
createPerson(name:"Peter"){
person{
name
}
ok
}
}
''')
# InputObjectTypes
# 输入多个参数
class PersonInput(graphene.InputObjectType):
name = graphene.String(required=True)
age = graphene.Int(required=True)
class CreatePerson(graphene.Mutation):
class Arguments:
person_data = PersonInput(required=True)
person = graphene.Field(Person)
@staticmethod
def mutate(root,info,person_data=None):
person = Person(
name = person_data.name,
age = person_data.age
)
return CreatePerson(person=person)
schema.execute(
'''
mutation myMutation{
createPerson(personData:{name:"Peter",age:24}){
person {
name,
age
}
}
}
'''
)
# complex input data
class LatLngInput(graphene.InputObjectType):
lat = graphene.Float()
lng = graphene.Float()
class LocationInput(graphene.InputObjectType):
name = graphene.String()
latlng = graphene.InputField(LatLngInput)
# output type
class CreatePerson(graphene.Mutation):
class Arguments:
name = graphene.String()
Output = Person
def mutate(root, info, name):
return Person(name=name)
schema.execute(
'''
mutation myFirstMutation {
createPerson(name:"Peter") {
name
__typename
}
}
'''
)
#===================Execution====================
#----------Query-------------
# query via context
class Query(ObjectType):
name = String()
def resolve_name(root, info):
return info.context.get('name')
schema = Schema(Query)
result = schema.execute('{ name }', context={'name': 'Syrus'})
assert result.data['name'] == 'Syrus'
# query via variables
class Query(ObjectType):
user = Field(User, id=ID(required=True))
def resolve_user(root, info, id):
return get_user_by_id(id)
schema = Schema(Query)
result = schema.execute(
'''
query getUser($id: ID) {
user(id: $id) {
id
firstName
lastName
}
}
''',
variables={'id': 12},
)
#query via root value
# Value used for Parent Value Object(parent) in root queries and mutations can be overridden using root parameter.
class Query(ObjectType):
me = Field(User)
def resolve_user(root, info):
return {'id': root.id, 'firstName': root.name}
schema = Schema(Query)
user_root = User(id=12, name='bob'}
result = schema.execute(
'''
query getUser {
user {
id
firstName
lastName
}
}
''',
root=user_root
)
# query via operation name,
# operation_name used to indicate which operation should be executed.
class Query(ObjectType):
me = Field(User)
def resolve_user(root, info):
return get_user_by_id(12)
schema = Schema(Query)
query_string = '''
query getUserWithFirstName {
user {
id
firstName
lastName
}
}
query getUserWithFullName {
user {
id
fullName
}
}
'''
result = schema.execute(
query_string,
operation_name='getUserWithFullName'
)
#---------Middleware----------
# class-based middleware
class AuthorizationMiddleware(object):
def resolve(next, root, info, **args):
if info.field_name == 'user':
return None
return next(root, info, **args)
result = schema.execute('THE QUERY', middleware=[AuthorizationMiddleware()])
#function middleware
def timing_middleware(next, root, info, **args):
start = time.time()
return_value = next(root, info, **args)
duration = time.time() - start
logger.debug("{parent_type}.{field_name}: {duration} ms".format(
parent_type=root._meta.name if root and hasattr(root, '_meta') else '',
field_name=info.field_name,
duration=round(duration * 1000, 2)
))
return return_value
result = schema.execute('THE QUERY', middleware=[timing_middleware])
#------DataLoader---------
from promise import Promise
from promise.dataloader import DataLoader
# batching
class UserLoader(DataLoader):
def batch_load_fn(self, keys):
# Here we return a promise that will result on the
# corresponding user for each key in keys
return Promise.resolve([get_user(id=key) for key in keys])
user_loader = UserLoader()
user_loader.load(1).then(lambda user: user_loader.load(user.best_friend_id))
user_loader.load(2).then(lambda user: user_loader.load(user.best_friend_id))
class UserLoader(DataLoader):
def batch_load_fn(self, keys):
users = {user.id: user for user in User.objects.filter(id__in=keys)}
return Promise.resolve([users.get(user_id) for user_id in keys])
# Using with Graphene
# {
# me {
# name
# bestFriend {
# name
# }
# friends(first: 5) {
# name
# bestFriend {
# name
# }
# }
# }
# }
class User(graphene.ObjectType):
name = graphene.String()
best_friend = graphene.Field(lambda: User)
friends = graphene.List(lambda: User)
def resolve_best_friend(root, info):
return user_loader.load(root.best_friend_id)
def resolve_friends(root, info):
return user_loader.load_many(root.friend_ids)
# file upload
# $pip install graphene-file-upload
from graphene_file_upload.scalars import Upload
class UploadMutation(graphene.Mutation):
class Arguments:
file = Upload(required=True)
success = graphene.Boolean()
def mutate(self, info, file, **kwargs):
# do something with your file
return UploadMutation(success=True)
|
import sqlite3
def main():
print('This is the databases.py file')
if __name__ == "__main__": main()
|
'''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array nums = [1,1,2],
Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. It doesn't matter what you leave beyond the new length.
'''
# first solution is by creating another array
arr = [1,2,2,5,6,7,7,8,9]
rem = []
# copy to the second array only when elements are not duplicate
for i in range(len(arr)):
if i == 0:
rem.append(arr[i])
elif i > 0 and arr[i] != arr[i-1]:
rem.append(arr[i])
count = 0
print rem
print arr
# another approach - not using another array - O(n*n)
arr = [1,2,2,5,6,7,7,8,9]
temp = 0
dupCount = 0
for i in range(len(arr)):
if i > 0 and arr[i] == arr[i-1]:
# we found a duplicate
dupCount += 1
# loop through rest of the array to change the index by -1
for j in range(i, len(arr)-1):
arr[j] = arr[j+1]
# trim the array original length minus duplicates
arr = arr[0:len(arr)-dupCount+1]
print arr
# another approach - not using another array - O(n)
arr = [1,2,3,5,6,7,7,8,9,9]
i = 0
nondup = 0
for j in range(len(arr)):
if arr[i] != arr[j]:
# till the time arr[i] = arr[j] then dont increment i
# as soon as they are not equal that means there is a new element
# so copy the new element
i = i + 1
nondup = nondup + 1
arr[i] = arr[j]
arr = arr[0:nondup + 1]
print arr |
import logging
from services.vk_service import VkService
from models.DiscordExtension import DiscordExtension
from models.ExecutionContext import ExecutionContext
class VkExtension(DiscordExtension):
def __init__(self, vk_service: VkService):
self.vk_service = vk_service
@property
def name(self):
return 'VK commands'
def isserving(self, ctx: ExecutionContext):
return ctx.cmd in ['vk-play']
async def execute(self, ctx: ExecutionContext):
cmd = ctx.cmd
if (cmd == 'vk-play'):
if (ctx.voice_channel() != None):
logging.info(f'Discord user id {ctx.author.id} has requested the song(s) via vk pm')
await self.vk_service.enqueue_audio(ctx.arg, ctx)
else:
logging.info(f'Discord user id {ctx.author.id} has requested the song via vk pm not being on the server')
def list_commands(self, ctx: ExecutionContext):
return []
def list_emojis(self):
return []
def emoji_to_command(self, emoji: str):
return None
async def initialize(self, bot):
pass
def dispose(self):
pass |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from abc import abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass, field
from enum import Enum
from typing import TYPE_CHECKING, Callable, ClassVar, Iterator, Type, cast
from typing_extensions import final
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.unions import UnionMembership
from pants.option.option_types import StrOption
from pants.option.scope import ScopeInfo
from pants.option.subsystem import Subsystem
from pants.util.docutil import doc_url
from pants.util.meta import classproperty
if TYPE_CHECKING:
from pants.engine.console import Console
class GoalSubsystem(Subsystem):
"""The Subsystem used by `Goal`s to register the external API, meaning the goal name, the help
message, and any options.
This class should be subclassed and given a `GoalSubsystem.name` that it will be referred to by
when invoked from the command line. The `Goal.name` also acts as the options_scope for the Goal.
Rules that need to consume the GoalSubsystem's options may directly request the type:
```
@rule
def list(console: Console, list_subsystem: ListSubsystem) -> List:
transitive = list_subsystem.transitive
documented = list_subsystem.documented
...
```
"""
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
"""Return `False` if this goal should not show up in `./pants help`.
Usually this is determined by checking `MyType in union_membership`.
"""
return True
@classmethod
def create_scope_info(cls, **scope_info_kwargs) -> ScopeInfo:
return super().create_scope_info(is_goal=True, **scope_info_kwargs)
@classproperty
@abstractmethod
def name(cls):
"""The name used to select the corresponding Goal on the commandline and the options_scope
for its options."""
@classproperty
def options_scope(cls) -> str:
return cast(str, cls.name)
@dataclass(frozen=True)
class Goal:
"""The named product of a `@goal_rule`.
This class should be subclassed and linked to a corresponding `GoalSubsystem`:
```
class ListSubsystem(GoalSubsystem):
'''List targets.'''
name = "list"
class List(Goal):
subsystem_cls = ListSubsystem
```
Since `@goal_rules` always run in order to produce side effects (generally: console output),
they are not cacheable, and the `Goal` product of a `@goal_rule` contains only a exit_code
value to indicate whether the rule exited cleanly.
"""
class EnvironmentBehavior(Enum):
"""Indicates that the goal will always operate on the local environment target.
This is largely the same behavior as Pants has had pre-2.15.
"""
LOCAL_ONLY = 2
f""" Indicates that the goal chooses the environments to use to execute rules within the goal.
This requires migration work to be done by the goal author. See
{doc_url('plugin-upgrade-guide')}.
"""
USES_ENVIRONMENTS = 3
exit_code: int
subsystem_cls: ClassVar[Type[GoalSubsystem]]
f"""Indicates that a Goal has been migrated to compute EnvironmentNames to build targets in.
All goals in `pantsbuild/pants` should be migrated before the 2.15.x branch is cut, but end
user goals have until `2.17.0.dev4` to migrate.
See {doc_url('plugin-upgrade-guide')}.
"""
environment_behavior: ClassVar[EnvironmentBehavior]
@classmethod
def _selects_environments(cls) -> bool:
return cls.environment_behavior == Goal.EnvironmentBehavior.USES_ENVIRONMENTS
@final
@classproperty
def name(cls) -> str:
return cast(str, cls.subsystem_cls.name)
class Outputting:
"""A mixin for Goal that adds options to support output-related context managers.
Allows output to go to a file or to stdout.
Useful for goals whose purpose is to emit output to the end user (as distinct from incidental logging to stderr).
"""
output_file = StrOption(
default=None,
metavar="<path>",
help="Output the goal's stdout to this file. If unspecified, outputs to stdout.",
)
@final
@contextmanager
def output(self, console: "Console") -> Iterator[Callable[[str], None]]:
"""Given a Console, yields a function for writing data to stdout, or a file.
The passed options instance will generally be the `Goal.Options` of an `Outputting` `Goal`.
"""
with self.output_sink(console) as output_sink:
yield lambda msg: output_sink.write(msg) # type: ignore[no-any-return]
@final
@contextmanager
def output_sink(self, console: "Console") -> Iterator:
stdout_file = None
if self.output_file:
stdout_file = open(self.output_file, "w")
output_sink = stdout_file
else:
output_sink = console.stdout # type: ignore[assignment]
try:
yield output_sink
finally:
output_sink.flush()
if stdout_file:
stdout_file.close()
class LineOriented(Outputting):
sep = StrOption(
default="\\n",
metavar="<separator>",
help="String to use to separate lines in line-oriented output.",
)
@final
@contextmanager
def line_oriented(self, console: "Console") -> Iterator[Callable[[str], None]]:
"""Given a Console, yields a function for printing lines to stdout or a file.
The passed options instance will generally be the `Goal.Options` of an `Outputting` `Goal`.
"""
sep = self.sep.encode().decode("unicode_escape")
with self.output_sink(console) as output_sink:
yield lambda msg: print(msg, file=output_sink, end=sep)
@dataclass(frozen=True)
class CurrentExecutingGoals(EngineAwareReturnType):
executing: dict[str, type[Goal]] = field(default_factory=dict)
def __hash__(self) -> int:
return hash(tuple(self.executing.keys()))
def is_running(self, goal: str) -> bool:
return goal in self.executing
@contextmanager
def _execute(self, goal: type[Goal]) -> Iterator[None]:
self.executing[goal.name] = goal
try:
yield
finally:
self.executing.pop(goal.name, None)
def cacheable(self) -> bool:
return False
|
def map_generate(f,seed,N):
x0 = seed
x = [x0]
i=1
while i<N:
rel = f(x0)
x.append(rel)
x0 = rel
i += 1
return x |
#!/usr/bin/env python
import unittest
TEST_MODULES = [
'plop.test.collector_test',
'plop.test.platform_test',
'plop.test.callgraph_test',
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
if __name__ == '__main__':
import tornado.testing
tornado.testing.main()
|
from model.City import City
class CityQueries():
@classmethod
def getCityByName(cls, name):
return City.get(city_name=name) |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 1 12:46:30 2019
@author: Raghav
"""
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import os
import functools
CHARACTERS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z']
_model = load_model('./Models/CNN/CNN3.h5')
classification_result = []
files = []
for filename in os.listdir('./output/segments/'):
files.append(filename)
def sorted_by(a,b):
val1 = int(a.split('.')[0])
val2 = int(b.split('.')[0])
print((val1,val2))
if(val1 < val2):
return -1
elif(val1 > val2):
return 1
return 0
cmp = functools.cmp_to_key(sorted_by)
files.sort(key=cmp)
for filename in files:
segment = image.load_img('output/segments/' + filename)
img_tensor = image.img_to_array(segment) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
class_ = _model.predict(img_tensor)
index = np.argmax(class_)
output = CHARACTERS[index]
classification_result.append(output)
plate_string = ''
for pred in classification_result:
plate_string += pred[0]
print(plate_string) |
import sys
import cv2
import numpy as np
from google.colab.patches import cv2_imshow
img1 = cv2.imread("first.jpg")
img2 = cv2.imread("second.jpg")
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# cv2_imshow(img1_gray)
# cv2_imshow(img2_gray) |
from collections import deque
from concurrent.futures import Future
# fut = Future()
# fut.result() # blocks - wait for set_result
# other threads
# fut.set_result(42) # unblock
class Queuey:
def __init__(self, maxsize):
self.maxsize = maxsize
self.items = deque()
self.getters = deque()
self.putters = deque()
def get_noblock(self):
if self.items:
# Wak a putter
if self.putters:
self.putters.popleft().set_result(True)
return self.items.popleft(), None
else:
fut = Future()
self.getters.append(fut)
return None, fut
def put_noblock(self, item):
if len(self.items) < self.maxsize:
self.items.append(item)
# Wake a getter
if self.getters:
self.getters.popleft().set_result(
self.items.popleft())
else:
fut = Future()
self.putters.append(fut)
return fut
if __name__ == "__main__":
q = Queuey(2)
q.put_noblock(1)
q.put_noblock(2)
print(q.put_noblock(3)) # q is full
print(q.items)
print(q.putters)
q.get_noblock() # free up an item in the q
print(q.putters)
q.get_noblock()
a = q.get_noblock() # nothing more to get -->wait
print(a)
q.put_noblock(3)
print(a)
print(a[1].result())
|
import json, sys
def convert(jsonmap, out):
width = jsonmap['width']
height = jsonmap['height']
tileWidth = jsonmap['tilewidth']
tileHeight = jsonmap['tileheight']
tileSets = jsonmap['tilesets']
layers = jsonmap['layers']
# First line
out.write( bytes("w%d,h%d,tw%d,th%d\n" % (width, height, tileWidth, tileHeight), 'UTF-8') )
# Tilesets
out.write( bytes("t%d\n" % len(tileSets), 'UTF-8') )
for tileSet in tileSets:
out.write( bytes("w%d,h%d,tw%d,th%d,g%d,s%d,n'%s'\n" % (tileSet['imagewidth'], \
tileSet['imageheight'], tileSet['tilewidth'], tileSet['tileheight'], \
tileSet['firstgid'], tileSet['spacing'], tileSet['image']), 'UTF-8') )
# Amount of layers
amount = 0
for layer in layers:
if (layer['type'] == "tilelayer"):
amount += 1
out.write(bytes("l%d\n" % amount, 'UTF-8'))
# Layers
for layer in layers:
if not (layer['type'] == "tilelayer"):
continue
data = layer['data']
out.write(bytes("w%d,h%d,d" % (layer['width'], layer['height']), 'UTF-8') )
for i in range(0, len(data)):
out.write(bytes("%d" % data[i], 'UTF-8') )
if not (i == len(data)-1 ):
out.write(bytes(",", 'UTF-8') )
out.write(bytes("\n", 'UTF-8') )
# amount of objects
amount = 0
for group in layers:
if (group['type'] == 'objectgroup'):
amount += 1
out.write(bytes("o%d\n" % amount, 'UTF-8') )
# Object groups
for group in layers:
if not (group['type'] == 'objectgroup'):
continue
data = group['objects']
out.write(bytes("%1d\n" % len(data), 'UTF-8'))
for gr in data:
out.write(bytes("x%d,y%d,w%d,h%d,v%d,n'%s'\n" % \
(gr['x'], gr['y'], gr['width'], gr['height'], \
int(gr['type']), gr['name']), 'UTF-8'))
out.close()
if __name__ == "__main__":
if not (len(sys.argv) == 5):
print("Usage: python3 %s -i input.json -o output.cmap" % sys.argv[0])
sys.exit(1)
i = 0
outfile = None
args = sys.argv[1:]
# Argument processing
while i < 4:
arg = args[i]
if not (arg[0] == '-' or not(arg[1] == 'i' or arg[1] == 'o')):
print("Invalid argument: %s" % arg)
sys.exit(1)
arg = args[i]
if arg[1] == 'i':
try:
arg = args[i+1]
infile = open(arg, 'r')
jsonmap = json.loads(infile.read())
except IOError:
print("Could not open input file: %s" % arg)
sys.exit(1)
elif arg[1] == 'o':
try:
arg = args[i+1]
outfile = open(arg, 'wb')
except IOError:
print("Could not open output file: %s" % arg)
sys.exit(1)
else:
print("Invalid argument")
sys.exit(1)
i += 2
convert(jsonmap, outfile) |
#!/usr/bin/env python
from __future__ import print_function
import subprocess, os, time, sys
short_name = 'Opt 3'
disp_name = 'Single search'
otype = 'Routine'
need = ['What do you want to search for: ', \
'Drag and drop a directory you want to search in: ']
answers = []
dups = {}
def run():
global answers, dups
answers=[]
dups={}
while True:
os.system('cls')
i = 0
while i < len(need):
ans = raw_input(need[i])
if validate(ans):
answers.append(ans)
i += 1
answers[1] = (answers[1])[1:-1]
wait_timer('\nSearching your in your files..')
files = os.listdir(answers[1])
for x in files:
if os.path.isfile(answers[1]+'\\'+str(x)):
with open(answers[1]+'\\'+str(x), 'r') as log_file:
for line in log_file:
if answers[0] in line:
dups.update({str(x):line})
output_to_file()
raw_input('\n\nSearch finished with '+str(len(dups))+ \
' results found. Please press enter to return.')
return
def output_to_file():
now = time.strftime("%d%b%Y-%H%m")
path = os.getcwd()
save_file = path+"\\Search_Results\\Single_Search_on_"+str(now)+'.txt'
sys.stdout.write('\nWriting search hits to file...')
with open(save_file, 'w') as output:
output.write('Search results for '+answers[0]+' in the '+ \
'directory of '+answers[1]+'.\n')
j=''
for x, y in dups.iteritems():
if x!=j: output.write('\nSearches found in file: '+str(x)+'.\n')
output.write(str(y))
sys.stdout.write('.')
j=x
def validate(char):
if char:
return True
return False
# this sections prints a wait timer
def wait_timer(what):
sys.stdout.write(what+'..')
i = 4
while i > 0:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(.25)
i -= 1
|
from django.db import models
from diarySite.settings import AUTH_USER_MODEL
class Diary(models.Model):
title = models.CharField(max_length=40)
content = models.TextField()
author = models.ForeignKey(AUTH_USER_MODEL)
diary_date = models.DateField()
def __str__(self):
return self.title
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Programming Techniques for Scientific Simulations, HS 2015, Week13
# File: harmonic_chain.py
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
def omega_calculated(K, m, N):
"""Calculates omega from the eigenvalue problem."""
W = 2 * np.eye(N) - np.eye(N, k=1)
return np.sqrt(K / m * np.linalg.eigvalsh(W, UPLO='U'))
def omega_theoretical(K, m, N):
"""Calculates omega theoretically."""
return np.sqrt(
K / m * (2 - 2 * np.cos(
np.pi * np.array(range(1, N + 1)) / (N + 1)
))
)
def main(K=1., m=1., N=16):
print(('{:25}' * 3 + '\n').format(
'Omega (calculated)',
'Omega (theoretical)',
'relative error'
))
for w1, w2 in zip(
omega_calculated(K=K, m=m, N=N),
omega_theoretical(K=K, m=m, N=N)
):
print((3 * '{:<25.7g}').format(w1, w2, abs((w1 - w2) / w2)))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from plone import api
from plone.app.testing import logout
from plone.registry.interfaces import IRegistry
from sc.photogallery.config import PROJECTNAME
from sc.photogallery.interfaces import IBrowserLayer
from sc.photogallery.interfaces import IPhotoGallerySettings
from sc.photogallery.testing import INTEGRATION_TESTING
from zope.component import getUtility
from zope.interface import alsoProvides
import unittest
class ControlPanelTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
alsoProvides(self.request, IBrowserLayer)
self.controlpanel = self.portal['portal_controlpanel']
def test_controlpanel_has_view(self):
view = api.content.get_view(u'photogallery-settings', self.portal, self.request)
view = view.__of__(self.portal)
self.assertTrue(view())
def test_controlpanel_view_is_protected(self):
from AccessControl import Unauthorized
logout()
with self.assertRaises(Unauthorized):
self.portal.restrictedTraverse('@@photogallery-settings')
def test_controlpanel_installed(self):
actions = [
a.getAction(self)['id'] for a in self.controlpanel.listActions()]
self.assertIn('photogallery', actions)
def test_controlpanel_removed_on_uninstall(self):
qi = self.portal['portal_quickinstaller']
with api.env.adopt_roles(['Manager']):
qi.uninstallProducts(products=[PROJECTNAME])
actions = [
a.getAction(self)['id'] for a in self.controlpanel.listActions()]
self.assertNotIn('photogallery', actions)
class RegistryTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.registry = getUtility(IRegistry)
self.settings = self.registry.forInterface(IPhotoGallerySettings)
def test_download_record_in_registry(self):
self.assertTrue(hasattr(self.settings, 'enable_download'))
self.assertEqual(self.settings.enable_download, False)
def test_records_removed_on_uninstall(self):
qi = self.portal['portal_quickinstaller']
with api.env.adopt_roles(['Manager']):
qi.uninstallProducts(products=[PROJECTNAME])
self.assertNotIn(
IPhotoGallerySettings.__identifier__ + '.enable_download',
self.registry,
)
|
# -*- coding: utf-8 -*-
from django.conf import settings as django_settings
import django
CACHE_LOADER_NAME = 'amp_tools.loader.CachedLoader'
AMP_TOOLS_LOADER = 'amp_tools.loader.Loader'
class SettingsProxy(object):
def __init__(self, settings, defaults):
self.settings = settings
self.defaults = defaults
def __getattr__(self, attr):
try:
return getattr(self.settings, attr)
except AttributeError:
try:
return getattr(self.defaults, attr)
except AttributeError:
raise AttributeError(u'settings object has no attribute "%s"' % attr)
class defaults(object):
AMP_TOOLS_TEMPLATE_FOLDER = u'amp'
AMP_TOOLS_TEMPLATE_PREFIX = ''
AMP_TOOLS_GET_PARAMETER = 'amp-content'
AMP_TOOLS_GET_VALUE = 'amp'
AMP_TOOLS_ACTIVE_URLS = []
AMP_TOOLS_TEMPLATE_LOADERS = []
if django.VERSION[0] < 2 and django.VERSION[1] < 8:
TEMPLATES = django_settings.TEMPLATE_LOADERS
else:
TEMPLATES = django_settings.TEMPLATES[0]['OPTIONS']['loaders']
for loader in TEMPLATES:
if isinstance(loader, (tuple, list)) and loader[0] == CACHE_LOADER_NAME:
for cached_loader in loader[1]:
if cached_loader != AMP_TOOLS_LOADER:
AMP_TOOLS_TEMPLATE_LOADERS.append(cached_loader)
elif loader != AMP_TOOLS_LOADER:
AMP_TOOLS_TEMPLATE_LOADERS.append(loader)
AMP_TOOLS_TEMPLATE_LOADERS = tuple(AMP_TOOLS_TEMPLATE_LOADERS)
settings = SettingsProxy(django_settings, defaults)
|
import cv2
cam=cv2.VideoCapture(2)
while True:
tf,frame=cam.read()
cv2.imshow('frame',frame)
key=cv2.waitKey(1)
if key==ord('k'):
break
cam.release()
cv2.destroyAllWindows() |
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
from .. import api
import speakeasy.winenv.defs.windows.com as comdefs
class ComApi(api.ApiHandler):
"""
Implements COM interfaces
"""
name = 'com_api'
apihook = api.ApiHandler.apihook
impdata = api.ApiHandler.impdata
def __init__(self, emu):
super(ComApi, self).__init__(emu)
self.funcs = {}
self.data = {}
super(ComApi, self).__get_hook_attrs__(self)
# First argument (self) is not reflected in method definitions; note this increases argc by 1
@apihook('IUnknown.QueryInterface', argc=3)
def IUnknown_QueryInterface(self, emu, argv, ctx={}):
"""
HRESULT QueryInterface(
REFIID riid,
void **ppvObject
);
"""
# not implemented
return comdefs.S_OK
@apihook('IUnknown.AddRef', argc=1)
def IUnknown_AddRef(self, emu, argv, ctx={}):
"""
ULONG AddRef();
"""
# not implemented
return 1
@apihook('IUnknown.Release', argc=1)
def IUnknown_Release(self, emu, argv, ctx={}):
"""
ULONG Release();
"""
# not implemented
return 0
@apihook('IWbemLocator.ConnectServer', argc=9)
def IWbemLocator_ConnectServer(self, emu, argv, ctx={}):
"""
HRESULT ConnectServer(
const BSTR strNetworkResource,
const BSTR strUser,
const BSTR strPassword,
const BSTR strLocale,
long lSecurityFlags,
const BSTR strAuthority,
IWbemContext *pCtx,
IWbemServices **ppNamespace
);
"""
ptr, strNetworkResource, strUser, strPassword, strLocale, lSecurityFlags, strAuthority, \
pCtx, ppNamespace = argv
argv[1] = self.read_wide_string(strNetworkResource)
if ppNamespace:
ci = emu.com.get_interface(emu, emu.get_ptr_size(), 'IWbemServices')
pNamespace = self.mem_alloc(emu.get_ptr_size(),
tag='emu.COM.ppNamespace_IWbemServices')
self.mem_write(pNamespace, ci.address.to_bytes(emu.get_ptr_size(), 'little'))
self.mem_write(ppNamespace, pNamespace.to_bytes(emu.get_ptr_size(), 'little'))
return comdefs.S_OK
@apihook('IWbemServices.ExecQuery', argc=6)
def IWbemServices_ExecQuery(self, emu, argv, ctx={}):
"""
HRESULT ExecQuery(
const BSTR strQueryLanguage,
const BSTR strQuery,
long lFlags,
IWbemContext *pCtx,
IEnumWbemClassObject **ppEnum
);
"""
ptr, strQueryLanguage, strQuery, lFlags, pCtx, ppEnum = argv
argv[1] = self.read_wide_string(strQueryLanguage)
argv[2] = self.read_wide_string(strQuery)
# not implemented so returning -1
return -1
|
import urllib.request
url = 'http://zhangyuzechn.cn'
response = urllib.request.urlopen(url = url)
print(response.read().decode())
|
#!/usr/bin/env python
import argparse
import json
import os
import re
import subprocess
import sys
import urllib
import json
SLACK_INTEGRATION_URL_KEY = "SLACK_INTEGRATION_URL"
DEFAULT_PULL_REQUEST_BODY_KEY = "DEFAULT_PULL_REQUEST_BODY"
DEFAULT_COMMIT_MESSAGE_KEY = "DEFAULT_COMMIT_MESSAGE"
PIVOTAL_API_TOKEN_KEY = "PIVOTAL_TRACKER_API_TOKEN"
GITHUB_API_TOKEN_KEY = "GITHUB_API_TOKEN"
EMPTY_CONFIG_CONTENT_DIC = {GITHUB_API_TOKEN_KEY: "", PIVOTAL_API_TOKEN_KEY: "",
DEFAULT_COMMIT_MESSAGE_KEY: "Commit", DEFAULT_PULL_REQUEST_BODY_KEY: "",
SLACK_INTEGRATION_URL_KEY: ""}
REPO_PATH = "" # for debug purposes
# PRH_CONFIG_PATH = "/usr/local/etc"
PRH_CONFIG_PATH = "config_file_path"
PRH_CONFIG_FILE_NAME = "/prh_config"
GIT_CONFIG_PATH = "/config"
GIT_FILE_PATH = ".git"
APP_VERSION = "2.4.1"
DEFAULT_COMMIT_MESSAGE = "" # prh_config.DEFAULT_COMMIT_MESSAGE
DEFAULT_PR_BODY = "" # prh_config.DEFAULT_PULL_REQUEST_BODY
NO_ERROR = 0
debug_is_on = 0
verbose_is_on = 0
local_only_is_on = 0
stay_is_on = 0
is_in_submodule = 0
repo_path = ""
pivotal_tracker_api_endpoint = "https://www.pivotaltracker.com/services/v5"
story = ""
class Service:
import requests
def __init__(self, token=False, header={}):
self.token = token
if token:
self.header = {"X-TrackerToken": self.token}
else:
self.header = header
def get(self, api):
response = self.requests.get(api, headers=self.header)
log("--> %s" % api)
log("<-- %s\n" % response.json())
return response
def post(self, api, data):
response = self.requests.post(api, data=data, headers=self.header)
log("--> %s" % api)
log("<-- %s\n" % response.json())
return response
def put(self, api, data):
response = self.requests.put(api, data=data, headers=self.header)
log("--> %s" % api)
log("<-- %s\n" % response.json())
return response
@staticmethod
def log(message):
if verbose_is_on:
print message
def storiesResponseToMarkdownText(arrayOfDicts_storyArray, arrayofStrings_orderedFieldNames):
# Treating each dictionary in our json array as representing a story,
# gather all of the stories' relevant values
arrayOfArrayOfString_allStories = []
# Find each story's relevant values and add them to `allStories`
for dict_story in arrayOfDicts_storyArray:
# initialize empty values array
arrayOfStrings_currentStoryOrderedFieldValues = []
## add all values we care about to the field values array
for string_fieldName in arrayofStrings_orderedFieldNames:
# if this story json has a value corresponding to `fieldName`,
# add that value to `currentStoryOrderedFieldValues`.
if string_fieldName in dict_story:
value = dict_story[string_fieldName]
if isinstance(value, int):
value = str(value)
value = value.replace('\n', " ")
# encode = str(name_).encode("utf-8")
arrayOfStrings_currentStoryOrderedFieldValues.append(
value
)
# otherwise, add an empty string to `currentStoryOrderedFieldValues`.
else:
arrayOfStrings_currentStoryOrderedFieldValues.append("")
# add the current story's field relevant values to `allStories` array
arrayOfArrayOfString_allStories.append(arrayOfStrings_currentStoryOrderedFieldValues)
# construct the markdown table given our column names and our rows:
return composeMarkdownTable(arrayofStrings_orderedFieldNames, arrayOfArrayOfString_allStories)
# Given the column names, and the rows (array of string arrays),
# constructs a string representing a markdown table.
def composeMarkdownTable(arrayOfStrings_columnNames, arrayOfArrayOfString_rows):
arrayOfArrayOfString_markdownTable = []
# add column names to the table
arrayOfArrayOfString_markdownTable.append(
stringArrayToMarkdownTableRow(arrayOfStrings_columnNames)
)
# add post-column separators to the table
arrayOfArrayOfString_markdownTable.append(
stringArrayToMarkdownTableRow(
["--"] * len(arrayOfStrings_columnNames)
)
)
# add the actual rows
for arrayOfString_row in arrayOfArrayOfString_rows:
arrayOfArrayOfString_markdownTable.append(
stringArrayToMarkdownTableRow(arrayOfString_row)
)
return "\n".join(arrayOfArrayOfString_markdownTable)
# A markdown table row is a single unbroken string,
# where each column is separated by a " | " string.
def stringArrayToMarkdownTableRow(stringArray):
string_separatorString = " | "
return string_separatorString.join(stringArray)
def log(message):
if verbose_is_on:
print message
def get_pivotal_story(story_id):
api = "{}/stories/{}".format(pivotal_tracker_api_endpoint, story_id)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).get(api)
story = resp.json()
return story
# /projects/{project_id}/labels/{label_id}
def get_pivotal_label_name(project_id, label_id):
api = "{}/projects/{project_id}/labels/{label_id}".format(pivotal_tracker_api_endpoint, project_id, label_id)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).get(api)
story = resp.json()
return story["name"]
def get_pivotal_stories(project_id, labels, columns):
global story
if story:
return story
filter_string = " and ".join(["label:{}".format(l) for l in labels])
urlencode = urllib.urlencode(
{'filter': filter_string, 'fields': ",".join(columns)})
api = "{}/projects/{}/stories?{}".format(pivotal_tracker_api_endpoint, project_id, urlencode)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).get(api)
return resp.json()
def get_pivotal_story_tasks(project_id, story_id):
"""
[
{
"kind": "task",
"id": 52555419,
"story_id": 140104217,
"description": "with some tasks",
"complete": false,
"position": 1,
"created_at": "2017-02-16T23:52:05Z",
"updated_at": "2017-02-16T23:52:05Z"
},
{
"kind": "task",
"id": 52555421,
"story_id": 140104217,
"description": "task 2",
"complete": false,
"position": 2,
"created_at": "2017-02-16T23:52:09Z",
"updated_at": "2017-02-16T23:52:09Z"
}
]
"""
api = "{}/projects/{}/stories/{}/tasks".format(pivotal_tracker_api_endpoint, project_id, story_id)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).get(api)
return resp.json()
def mark_pivotal_story_finished(project_id, story_id):
api = "{}/projects/{}/stories/{}".format(pivotal_tracker_api_endpoint, project_id, story_id)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).put(api, {"current_state": "finished"})
return resp.json()
def post_pivotal_comment(project_id, story_id, text):
api = "{}/projects/{}/stories/{}/comments".format(pivotal_tracker_api_endpoint, project_id, story_id)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).post(api, {"text": text})
return resp.json()
class ReleaseConfig:
def __init__(self, tag_name="", target_commitish="", name="", body="", draft=False, prerelease=False):
self.tag_name = tag_name
self.target_commitish = target_commitish
self.name = name
self.body = body
self.draft = draft
self.prerelease = prerelease
# POST /repos/:owner/:repo/releases
def post_github_release(owner, repo, release_config):
github = "https://api.github.com"
api = "{}/repos/{}/{}/releases".format(github, owner, repo)
data = {
"tag_name": release_config.tag_name,
"target_commitish": release_config.target_commitish,
"name": release_config.name,
"body": release_config.body,
"draft": release_config.draft,
"prerelease": release_config.prerelease
}
res = github_api_post(api, data)
return res
def get_pivotal_project_id(story_id):
global story
if story:
return story["project_id"]
api = "{}/stories/{}".format(pivotal_tracker_api_endpoint, story_id)
resp = Service(read_from_config_file()[PIVOTAL_API_TOKEN_KEY]).get(api)
story = resp.json()
return story["project_id"]
def finish_and_post_message_to_pivotal(story_id, message):
project_id = get_pivotal_project_id(story_id)
if not read_from_config_file()[PIVOTAL_API_TOKEN_KEY]:
return 1
if not mark_pivotal_story_finished(project_id, story_id):
return 1
if not post_pivotal_comment(project_id, story_id, message):
return 1
return NO_ERROR
def run_command_str(command, output=0):
command_list = str.split(command)
run_command(command_list, output)
def run_command(command, output=0):
"""
run the given command
:param command:
:param output:
:return: 0 if no error occurs or the error code
>>> run_command(["echo","hello world"],1)
'hello world\\n'
>>> run_command(["echo","hello world"])
0
>>> run_command(["ls","buchofjunck"],1)
1
>>> run_command(["ls","buchofjunck"],0)
1
"""
if debug_is_on:
print_command(command)
return NO_ERROR
else:
if verbose_is_on:
print command
try:
if output:
run_output = subprocess.check_output(command)
else:
run_output = subprocess.check_call(command)
log("\nOUTPUT:\t" + str(run_output))
return run_output
except subprocess.CalledProcessError as e:
return e.returncode
def print_command(command):
print get_head() + " >>> " + reduce(lambda x, y: x + " " + y, command)
def checkout(branch_name):
command = ["git", "checkout", branch_name]
run_command(command)
def add_files(file_paths):
command = ["git", "add"] + file_paths
return run_command(command, 1)
def add_all():
command = ["git", "add", "-A"]
return run_command(command, 1)
def get_head(current_path=""):
# read the head from git dir
with open(get_repo_git_dir(current_path) + "/HEAD") as f:
ref = f.read()
keyword = "refs/heads/"
i = ref.rfind(keyword)
return ref[i + len(keyword):].strip()
def get_repo_git_dir(current_path=""):
"""
>>> get_repo_git_dir("/Users/kayvan/Documents/sources/Dox Source/dxCore")
'../.git/modules/dxCore'
>>> get_repo_git_dir("/Users/kayvan/Documents/sources/Dox Source")
'/Users/kayvan/Documents/sources/Dox Source/.git'
"""
if current_path and not current_path.endswith("/"):
current_path += "/"
git_dir_path = current_path + GIT_FILE_PATH
# in the case of being in a submodule folder
if os.path.isfile(git_dir_path):
with open(git_dir_path) as f:
line = f.readline()
# gitdir: ../.git/modules/dxCore
git_dir_path = line.split(":")[1].strip()
log("git dir path = " + git_dir_path)
return git_dir_path
def get_submodule_name():
command = ["git", "submodule"]
output = run_command(command, True)
return output.split(" ")[1]
def get_status():
return subprocess.check_output(["git", "status"])
def launch_browser(url):
command = ["open", url if url else ""]
run_command(command)
def cd(path):
command = ["cd", path]
run_command(command)
def ask_user(question):
answer = raw_input(question)
return str.lower(answer) == 'y'
def add_changes(is_add_all, file_paths):
if is_add_all:
error = add_all()
elif file_paths:
error = add_files(file_paths)
else:
error = "Failed to add files"
return error
def delete_branch(branch_name):
command = ["git", "branch", "-D", branch_name]
res = run_command(command, 1)
return res
def git_stash():
command = ["git", "stash"]
res = run_command(command)
return res
def git_stash_apply():
command = ["git", "stash", "apply"]
res = run_command(command, 1)
return res
def git_reset_head():
command = ["git", "reset", "HEAD"]
res = run_command(command)
return res
def create_branch(branch_name):
command = ["git", "checkout", "-q", "-b", branch_name]
res = run_command(command, 0)
if res == 128:
answer = raw_input(">>> Branch '%s' already exists, would you like to check it out (y/n)?" % branch_name)
if str.lower(answer) == 'y':
add_all()
git_stash()
checkout(branch_name)
error = git_stash_apply()
git_reset_head()
if error:
return "Error applying changes"
if ask_user(">>> Proceed with commiting and creating PR (y/n)?"):
return "Aborted"
else:
return "Failed to create the new branch"
else:
return res
def has_git_editor_set():
command = ["git", "config", "--get", "core.editor"]
return run_command(command, 1)
def commit(user_input):
if not user_input.commit_message:
for story_id in user_input.tracker_ids:
if story_id:
story_json = get_pivotal_story(story_id)
user_input.commit_message = story_json["name"]
else:
user_input = DEFAULT_COMMIT_MESSAGE
if has_git_editor_set():
command = ["git", "commit", "-e", "-m", str(user_input.commit_message)]
else:
command = ["git", "commit", "-m", str(user_input.commit_message)]
res = run_command(command)
if res:
return "Failed to commit changes"
def push(branch_name):
if local_only_is_on:
return NO_ERROR
command = ["git", "push", "--set-upstream", "origin", branch_name]
res = run_command(command)
if res:
return "Failed to push the commit to origin"
def find_existing_pr(owner, repo, head, base):
api = "https://api.github.com/repos/{}/{}/pulls".format(owner, repo)
res = github_api_get(api)
if res.status_code < 300:
matching_pr_list = [a for a in res.json()[:] if a["head"]["ref"] == head and a["base"]["ref"] == base]
if matching_pr_list:
return matching_pr_list[0]["html_url"]
def read_pr_template():
pr_template_file_name = "PULL_REQUEST_TEMPLATE.md"
pr_template_path = ".github/" + pr_template_file_name
file_to_read = ""
if os.path.isfile(pr_template_path):
file_to_read = pr_template_path
elif os.path.isfile(pr_template_file_name):
file_to_read = pr_template_file_name
if file_to_read:
with open(file_to_read, mode='r') as f:
return f.read()
def create_pull_request(from_branch, to_branch, user_input):
if local_only_is_on:
return NO_ERROR
if not user_input.pr_title:
pr_title = get_head().replace("_", " ")
if not user_input.pr_body:
pr_body = DEFAULT_PR_BODY
else:
pr_body = user_input.pr_body + "\n" + DEFAULT_PR_BODY
# Add description of stories to the pr_body
for i in range(len(user_input.tracker_urls)):
story = get_pivotal_story(user_input.tracker_ids[i])
description = name = ""
if "description" in story:
description = story["description"]
if "name" in story:
name = story["name"]
pr_body = pr_body + "\n\n**Story:** [" + name + "](" + user_input.tracker_urls[i] + ")\n" + description
pr_template = read_pr_template()
if pr_template:
log("Reading from PR-Template")
pr_body = pr_body + "\n" + pr_template
setup_config_dic = read_from_setup_file()
owner = setup_config_dic["owner"]
repo = setup_config_dic["repo"]
if not owner:
print "run prh setup first"
return 1
# https://developer.github.com/v3/pulls/#create-a-pull-request
github = "https://api.github.com"
api = "{}/repos/{}/{}/pulls".format(github, owner, repo)
data = {
"title": pr_title,
"body": pr_body,
"head": from_branch,
"base": to_branch
}
res = github_api_post(api, data)
if res.status_code == 201:
pr_url = res.json()["html_url"]
print "PR created: {}".format(pr_url)
if pr_url and str(pr_url)[:4] == "http":
for i in range(len(user_input.tracker_ids)):
if user_input.tracker_ids[i]:
project_id = get_pivotal_project_id(user_input.tracker_ids[i])
if post_pivotal_comment(project_id, user_input.tracker_ids[i], "PR: " + pr_url):
print "error with pivotal, commenting pr link"
if ask_user("Mark story with id=" + user_input.tracker_ids[i] + " as finished?(y/n)"):
if mark_pivotal_story_finished(project_id, user_input.tracker_ids[i]):
print "error with pivotal, marking story as finished"
launch_browser(pr_url)
return NO_ERROR
else:
existing_pr_url = find_existing_pr(owner, repo, from_branch, to_branch)
if existing_pr_url:
print existing_pr_url
launch_browser(existing_pr_url)
return NO_ERROR
for e in res.json()["errors"]:
print "Error:", e["message"]
return "Failed to create pull-request from " + from_branch + " to " + to_branch
def github_api_post(api, data):
headers = {"Authorization": "token " + read_from_config_file()[GITHUB_API_TOKEN_KEY]}
response = Service(header=headers).post(api, data=json.dumps(data))
log("--> %s" % api)
log("<-- %s\n" % response.json())
return response
def github_api_get(api):
headers = {"Authorization": "token " + read_from_config_file()[GITHUB_API_TOKEN_KEY]}
response = Service(header=headers).get(api)
log("--> %s" % api)
log("<-- %s\n" % response.json())
return response
def verify_file_paths(file_paths):
# verify all the provided file paths are valid
if file_paths:
for p in file_paths:
if not os.path.exists(p):
print "Make sure %s exists" % p
return 1
def verify_parent_in_origin(origin):
if not os.path.exists(get_repo_git_dir() + "/refs/remotes/origin/%s" % origin):
print "could not find the parent branch '%s' in your local remote refs, in case of error, make sure you have " \
"pushed the parent branch" % origin
return 0
def terminate_on_error(func, args):
error = func(args)
if error:
return error
def parse_commit_message(raw_commit_message):
# re_search = re.search("http[s]?:\/\/.*pivotaltracker.*/(\d*)", commit_message)
commit_message = raw_commit_message
re_res = re.findall("http[s]?:\/\/.*pivotaltracker.*\/(\d*)", commit_message)
# "https://www.pivotaltracker.com/story/show/140176051 https://www.pivotaltracker.com/story/show/139604723"
full_urls = story_ids = []
if re_res:
for url in re_res:
full_urls += url[0]
story_ids += url[1]
commit_message = commit_message.replace(url[0], "")
return commit_message, full_urls, story_ids
# if re_search:
# full_url = re_search.group(0)
# story_id = re_search.group(1)
# global pivotal_tracker_story_id
# pivotal_tracker_story_id = story_id
# global pivotal_tracker_story_url
# pivotal_tracker_story_url = full_url
# commit_message = commit_message.replace(full_url, "")
def parse_commit_message(commit_message, full_urls, story_ids):
"""
Parse the user entered commit message and extract any known urls from it
:param commit_message:
:param full_urls:
:param story_ids:
:return: (commit_message, full_urls, story_ids)
"""
re_search = re.search("http[s]?:\/\/\S*pivotaltracker.com\S*\/(\d*)", commit_message)
if re_search:
full_urls += [re_search.group(0)]
story_ids += [re_search.group(1)]
commit_message = commit_message.replace(re_search.group(0), "")
else:
return commit_message, full_urls, story_ids
return parse_commit_message(commit_message, full_urls, story_ids)
def process_from_child(origin, new, add_all, just_pr, file_paths, user_input):
return create_branch(new) \
or (not just_pr and add_changes(add_all, file_paths)) \
or (not just_pr and commit(user_input)) \
or push(new) \
or create_pull_request(new, origin, user_input) \
or (stay_is_on and checkout(origin)) \
or "Done"
def process_to_parent(origin, parent, add_all, just_pr, file_paths, user_input):
return (not just_pr and add_changes(add_all, file_paths)) \
or (not just_pr and commit(user_input)) \
or push(origin) \
or create_pull_request(origin, parent, user_input) \
or "Done"
def revert_all(branch_origin, branch_child, branch_parent, is_add_all, file_paths):
if checkout(branch_origin):
return "Failed to check out original branch"
class UserInput:
def __init__(self, commit_message="", tracker_urls=[], tracker_ids=[], pr_title="", pr_body=""):
self.pr_title = pr_title
self.pr_body = pr_body
self.tracker_ids = tracker_ids
self.tracker_urls = tracker_urls
self.commit_message = commit_message
def release(release_story_id, owner, repo, tag_name):
if not release_story_id:
print("Have to provide a pivotal tracker label for this release")
return
release_story = get_pivotal_story(release_story_id)
release_labels = [l["name"] for l in release_story["labels"]]
release_name = release_story["name"]
release_project_id = release_story["project_id"]
# fetch all the stories with given label
columns = ["id", "name", "description", "story_type", "url"]
stories = get_pivotal_stories(release_project_id, release_labels, columns)
release_body = storiesResponseToMarkdownText(stories, columns)
post_github_release(owner, repo,
ReleaseConfig(tag_name=tag_name, target_commitish="master", name="v%s" % release_name,
body=release_body))
def parse_args(args):
# there is a syntax error in arguments
if not args:
return False
if args.release:
re_res = re.findall("http[s]?:\/\/.*pivotaltracker.*\/(\d*)", args.release)
setup_config_dic = read_from_setup_file()
owner = setup_config_dic["owner"]
repo = setup_config_dic["repo"]
if args.tag and (args.repo or repo) and (args.owner or owner):
release(re_res[0], owner=(args.owner or owner), repo=(args.repo or repo), tag_name=args.tag)
else:
print("parameter is missing, have to provide all of: owner, repo, tag")
return False
file_paths = []
branch_child = branch_parent = pr_title = pr_body = is_add_all = is_just_pr = commit_message = ""
need_to_confirm_empty = need_to_confirm_add_all = ""
# get main branch name
branch_origin = get_head()
working_path = ""
user_input = UserInput()
if args.setup:
setup()
return
if args.path:
working_path = args.path
if working_path[-1] != "/":
working_path += "/"
if args.debug:
global debug_is_on
debug_is_on = 1
if args.verbose:
global verbose_is_on
verbose_is_on = 1
if args.stay_on:
global stay_is_on
stay_is_on = 1
if args.branch:
branch_child = args.branch
if args.sub_branch:
branch_child = branch_origin + "_" + args.sub_branch
if args.pr_body:
user_input.pr_body = args.pr_body
if args.pr_title:
user_input.pr_title = args.pr_title
if args.add:
# -a exists
for p in args.add:
file_paths.append(working_path + p)
# no path to add
if not file_paths:
need_to_confirm_empty = 1
else:
# no -a
need_to_confirm_add_all = 1
if args.empty:
is_just_pr = True
is_add_all = False
need_to_confirm_add_all = False
if args.upto:
branch_parent = args.upto
if args.sub:
setup_file = read_from_setup_file()
for pair in setup_file["submodules"]:
if os.path.exists(pair):
cd(pair)
submodule_args = args
submodule_args["sub"] = 0
parse_args(submodule_args)
if args.message:
commit_message, full_urls, story_ids = parse_commit_message(args.message, [], [])
user_input.tracker_urls = full_urls
user_input.tracker_ids = story_ids
user_input.commit_message = commit_message
if args.local:
global local_only_is_on
local_only_is_on = 1
# Verification
error = verify_file_paths(file_paths)
if error:
return error
error = verify_parent_in_origin(branch_parent if branch_parent else branch_origin)
if error:
return error
if need_to_confirm_add_all:
list_of_changes = str(run_command(["git", "add", "-A", "-n"], 1)).strip()
if not list_of_changes:
# list of changes is empty
need_to_confirm_empty = True
else:
print("\n" + list_of_changes)
if ask_user(">>> Would you like to apply above changes (y/n)? "):
is_add_all = True
else:
return "Either add files using -a or add all the changes"
if need_to_confirm_empty:
if ask_user(">>> No file has been added, would you like to continue creating PR (y/n)? "):
is_just_pr = True
else:
return "Either add files using -a or add all the changes"
if branch_child and not branch_parent:
print process_from_child(branch_origin, branch_child, is_add_all, is_just_pr, file_paths, user_input)
elif branch_parent and not branch_child:
print process_to_parent(branch_origin, branch_parent, is_add_all, is_just_pr, file_paths, user_input)
else:
return
def missing_global_config():
prh_config = read_from_config_file()
return not prh_config[GITHUB_API_TOKEN_KEY]
def missing_local_config():
setup_config = read_from_setup_file()
return not setup_config
def setup():
print "Running setup"
prh_config = read_from_config_file()
github_token = prh_config[GITHUB_API_TOKEN_KEY]
pivotal_token = prh_config[PIVOTAL_API_TOKEN_KEY]
# global setup
config_changed = 0
if not github_token:
github_token = raw_input("Please enter your Github API token: ")
if github_token:
config_changed = 1
if not pivotal_token:
pivotal_token = raw_input("Please enter your PivotalTracker API token: ")
if pivotal_token:
config_changed = 1
if config_changed:
write_to_config_file({GITHUB_API_TOKEN_KEY: github_token, PIVOTAL_API_TOKEN_KEY: pivotal_token,
DEFAULT_COMMIT_MESSAGE_KEY: "Commit", DEFAULT_PULL_REQUEST_BODY_KEY: "",
SLACK_INTEGRATION_URL_KEY: ""})
# local setup
git_dir = get_repo_git_dir()
if os.path.isdir(git_dir):
with open(git_dir + GIT_CONFIG_PATH) as git_config:
config_string = git_config.read()
remotes = re.findall('\[remote "(.*)"\].*\n.*url = (git.*\.git).*', config_string)
# submodules = re.findall('\[submodule "(.*)"\]\n.*url = (.*).*', config_string)
else:
print "You should run prh from a git repository directory"
return
if not remotes:
print "Could not find origin url in the .git/config file.\nYour origin URL should be in form of git.*\.git"
return
write_to_setup_file(remotes)
def run_popen(command):
popen = subprocess.Popen(command, stdin=subprocess.PIPE, shell=True, stdout=subprocess.PIPE)
with popen.stdout as output:
return output.readline().replace("\n", "")
return '.'
def get_owner(git_url):
"""
>>> get_owner("git@github.com:doximity/Android.git")
'doximity'
"""
return git_url.split(":")[-1].split("/")[0]
def get_repo(git_url):
"""
>>> get_repo("git@github.com:doximity/Android.git")
'Android'
"""
return git_url.split(":")[-1].split("/")[1].split(".")[0]
def write_to_setup_file(remotes):
# git@github.com:doximity/Android.git
index = 1
selected_remote_index = '1'
if len(remotes) > 2:
for i, j in remotes:
print "%d : %s = %s" % (index, i, j)
index += 1
selected_remote_index = raw_input("Which remote to use (enter line number)?")
owner = get_owner(remotes[int(selected_remote_index) - 1][1])
repo = get_repo(remotes[int(selected_remote_index) - 1][1])
submodules_dic = {}
# for submodule in submodules:
# submodules_dic[submodule[0]] = submodule[1]
# command = ["git rev-parse --show-toplevel"]
# repo_root_path = run_popen(command)
out = json.dumps({"owner": owner, "repo": repo, "submodules": submodules_dic})
with open(repo_path + '/.prh', 'w') as f:
f.write(out)
def read_from_setup_file():
"""
Read the REPO Scoped config file of PRH
:return:
"""
if os.path.exists(repo_path + '/.prh'):
with open(repo_path + '/.prh', 'r') as f:
return json.load(f)
def parse_arguments():
parser = argparse.ArgumentParser(version=APP_VERSION)
parser.add_argument("--verbose", help="run in verbose mode", const=True, nargs='?')
parser.add_argument("-d", "--debug", help="run in debug mode", const=True, nargs='?')
parser.add_argument("-s", "--stay_on", help="come back to current branch after all is done", const=True, nargs='?')
parser.add_argument("-b", "--branch", help="Name of child branch", nargs='?')
parser.add_argument("-sb", "--sub_branch", help="Name of child branch appended to the name of parent branch",
nargs='?')
parser.add_argument("-pb", "--pr_body", help="Overwrite PullRequest Body text", nargs='?')
parser.add_argument("-pt", "--pr_title", help="OverWrite PullRequest Title text", nargs='?')
parser.add_argument("-a", "--add",
help="Add files with given path, not using the option will add all files",
nargs='*')
parser.add_argument("-e", "--empty",
help="not making any commits or adds, just creates the PR",
const=True,
nargs='?')
parser.add_argument("-upto", "--upto", help="Name of the parent branch that this PR should point to", nargs='?')
parser.add_argument("-sub", "--sub", help="WIP", const=True, nargs='?')
parser.add_argument("-m", "--message",
help="Overwrite commit message or add a Pivotal Tracker "
"story link to fetch all the details from the story",
nargs='?')
parser.add_argument("-l", "--local",
help="Do not push any changes or create a PR, only create the branch and make the commit",
const=True, nargs='?')
parser.add_argument("setup", help="Setup the pull-request helper", const=True, nargs='?')
parser.add_argument("release",
help="URL of the release story on Pivotal tracker that has matching label to all the stories "
"in that release. For example, the release story might have '1.2.3' as a label, then all "
"the stories that come up from searching the '1.2.3' lable will be included in the "
"release",
const=True, nargs='?')
parser.add_argument("-p", "--path")
parser.add_argument("--owner", help="Repository owner. ex: for doximity/android the owner is doximity")
parser.add_argument("--repo", help="Repository name. ex: for doximity/android the owner is android")
parser.add_argument("--tag", help="tag name for the release")
args = parser.parse_args()
if not args.branch and not args.upto and not args.sub_branch and not args.setup:
parser.print_help()
return False
return args
def write_to_config_file(dic, to_path=PRH_CONFIG_PATH + PRH_CONFIG_FILE_NAME + ".json"):
"""
write to the user scoped config file for PRH
:return:
"""
with open(to_path, mode='w') as f:
f.write("{")
for i in range(len(dic) - 1):
f.write('"%s":"%s",' % (dic.keys()[i], dic.values()[i]))
f.write('"%s":"%s"' % (dic.keys()[-1], dic.values()[-1]))
f.write("}")
def read_from_config_file(file_path=PRH_CONFIG_PATH + PRH_CONFIG_FILE_NAME + ".json"):
"""
read from the user scoped config file for PRH
"""
config_path = file_path
if os.path.isfile(config_path):
with open(config_path, mode='r') as f:
return json.load(f)
else:
write_to_config_file({
GITHUB_API_TOKEN_KEY: "",
PIVOTAL_API_TOKEN_KEY: "",
DEFAULT_COMMIT_MESSAGE_KEY: "Commit",
DEFAULT_PULL_REQUEST_BODY_KEY: "",
SLACK_INTEGRATION_URL_KEY: ""
})
return read_from_config_file()
def migrate_config_file(from_path=PRH_CONFIG_PATH + PRH_CONFIG_FILE_NAME + ".py",
to_path=PRH_CONFIG_PATH + PRH_CONFIG_FILE_NAME + ".json"):
old_config_path = from_path
old_dic = {}
if os.path.isfile(old_config_path):
with open(old_config_path, "r") as conf:
for line in conf.readlines():
key, value = line.split("=")
old_dic[key.strip()] = value.strip('" \n')
write_to_config_file(old_dic, to_path)
os.remove(old_config_path)
def main():
migrate_config_file()
if REPO_PATH:
global repo_path
repo_path = REPO_PATH
else:
# get current working dir
# global repo_path
repo_path = os.getcwd()
if missing_global_config() or missing_local_config():
setup()
sys.exit(parse_args(parse_arguments()))
if __name__ == "__main__":
main()
|
"""
Created by Alex Wang on 2017-06-23
"""
import tensorflow as tf
def add(var_one):
var_one = tf.add(var_one, var_one)
return var_one
def test_reuse_variables():
sess = tf.Session()
summary_writer = tf.summary.FileWriter('E://temp/tensorflow/log')
var_one = tf.constant(1)
var_summary = tf.summary.scalar(var_one.op.name ,tensor=var_one)
summary_op = tf.summary.merge_all()
for i in range(10):
var_one = add(var_one)
summary_str = sess .run( summary_op)
summary_writer.add_summary (summary_str, i)
var_value = sess.run([var_one])
print(var_value)
sess.close()
summary_writer.close()
def test_learning_rate():
decay_step =1000
for i in range(1000000):
decayed_learning_rate = 1.0 * 0.995 ** (i / decay_step)
if i % 1000 == 0:
print(decayed_learning_rate)
if __name__ == "__main__":
# test_reuse_variables()
test_learning_rate() |
from django.contrib import admin
from .models import LaunchSite, OperationalStatus, OrbitalStatus, Source, CatalogEntry, TLE
admin.site.register(LaunchSite)
admin.site.register(OperationalStatus)
admin.site.register(OrbitalStatus)
admin.site.register(Source)
admin.site.register(CatalogEntry)
admin.site.register(TLE) |
# coding: utf-8
import json
import random
from os import path
from .config import EVERY_REPLY_SEND_COUNT, LOG_TEMPLATE_SEARCH_COUNT, LOG_TEMPLATE_TOO_FEW_SKIP
from .logger import logger
from .session import asession_get
from .assets.fabiaoqing.updater import tags_file_name, tag_url_template
fabiaoqing_tags = []
fabiaoqing_tags_file_path = path.join('forklift/assets/fabiaoqing', tags_file_name)
with open(fabiaoqing_tags_file_path) as f:
fabiaoqing_tags = json.load(f)
def get_tag_from_fabiaoqing(query):
for tag in fabiaoqing_tags:
if query == tag['name']:
return tag
async def get_sticker_urls_by_fabiaoqing_tag(tag, filetype):
page = 1
page_count = tag['page_count']
if page_count > 2:
page = random.randint(page, page_count - 1)
url = tag_url_template.format(tag['id'], page)
r = await asession_get(url)
sticker_urls = [sticker_el.attrs.get('data-original') for sticker_el in r.html.find('.tagbqppdiv .image')]
if filetype:
sticker_urls = [u for u in sticker_urls if u.endswith('.{}'.format(filetype))]
logger.debug(LOG_TEMPLATE_SEARCH_COUNT.format(r.url, len(sticker_urls)))
if len(sticker_urls) < 10:
logger.debug(LOG_TEMPLATE_TOO_FEW_SKIP)
return []
sticker_urls = random.sample(sticker_urls, EVERY_REPLY_SEND_COUNT)
return sticker_urls
async def get_sticker_urls_from_google(query, filetype):
url = 'https://www.google.com/search'
params = {
'q': '{} 表情包'.format(query),
'hl': 'zh-CN',
'gws_rd': 'cr',
'tbm': 'isch',
'tbs': 'ift:{}'.format(filetype) if filetype else None
}
r = await asession_get(url, params=params)
tags = r.html.find('#images .image')
logger.debug(LOG_TEMPLATE_SEARCH_COUNT.format(r.url, len(tags)))
sticker_urls = []
from urllib import parse
for tag in tags:
url = tag.attrs.get('href')
url = parse.parse_qs(parse.urlsplit(url).query)
url = dict(url)
url = url.get('imgurl')
if len(url):
sticker_urls.append(url[0])
sticker_urls = sticker_urls[:15]
sticker_urls = random.sample(sticker_urls, EVERY_REPLY_SEND_COUNT)
return sticker_urls
async def get_sticker_urls(query, filetype=None):
sticker_urls = []
tag = get_tag_from_fabiaoqing(query)
if tag:
try:
sticker_urls = await get_sticker_urls_by_fabiaoqing_tag(tag, filetype)
except Exception as e:
logger.error(e)
if not sticker_urls:
sticker_urls = await get_sticker_urls_from_google(query, filetype)
return sticker_urls
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 17:04:47 2021
@author: troyd
"""
import praw #PRAW provides Reddit API functionality
import datetime #Provides capability to easily formate timestamps of message postings
import time
import os
#Reddit API Credentials
reddit = praw.Reddit(user_agent = os.environ.get('user_agent_redditapi'),
client_id = os.environ.get('client_id_redditapi'), client_secret = os.environ.get('client_secret_redditapi'),
username = os.environ.get('username_reddit'), password = os.environ.get('password_reddit'))
#Change this variable to indicate what subreddit you want to collect
#Find the subreddit manually on Reddit
#Then change the subreddit name here to be exactly the same
#No white spaces! A multi-word subreddit will have underscores, e.g., "three_word_subreddit"
subreddits = ["pelotoncycle", "celebrity", "entertainment", "Supplements", "Millenialshumor", "outdoors", "hiking", "fitness30plus", "keto", "nutrition", "homefitness", "exercise", "weightlifting", "bodybuilding", "bodyweightfitness", "progresspics"]
#File gets written to the same directory this Python script is located. The file will be called "output.csv"
f = open('CIS509_data_' + str(datetime.date.today()) + '.csv','w', encoding='utf8') #'CIS509_data_' + str(datetime.date.today()) + '.csv'
#In this next line we print out column headers
f.write("'" + 'Subreddit' + "','" + 'MsgID'+ "','" + 'Timestamp'+ "','" + 'Author'+ "','" + 'ThreadID'+ "','" + 'ThreadTitle'+ "','" + 'MsgBody'+ "','" + 'ReplyTo'+ "','" + 'Permalink' +"'\n")
#Begin streaming user-generated comments from the focal subreddit specified in the 'subreddit' variable earlier in this code
count = 1
now = time.perf_counter()
for subreddit in subreddits:
for comment in reddit.subreddit(subreddit).stream.comments():
#Refer to the documentation for PRAW to see what API commands are available
commentID = str(comment.id) #Every Reddit post has an identification number. Here we extract it
author = str(comment.author).replace(";", "").replace("'","").replace(",","").replace("\"","").replace("\n", " ").replace("\r"," ") #Name of message author
timestamp = str(datetime.datetime.fromtimestamp(comment.created)) #Timestamp of when message was posted
replyTo = "" #Whether the collected message was a direct reply to another existing message.
if not comment.is_root: #If it is indeed a reply, this column contains the message ID of the parent message. If it is not a reply, a '-' is written to this column
replyTo = str(comment.parent().id)
else:
replyTo = "-"
threadID = str(comment.submission.id) # The ID of the thread the message was posted in
threadTitle = str(comment.submission.title).replace(";", "").replace("'","").replace(",","").replace("\"","").replace("\n", " ").replace("\r"," ") #The title of the thread the message was posted in
msgBody = str(comment.body).replace(";", "").replace("'","").replace(",","").replace("\"","").replace("\n", " ").replace("\r"," ") #The message itself
permalink = str(comment.permalink).replace(";", "").replace("'","").replace(",","").replace("\"","").replace("\n", " ").replace("\r"," ") #A URL you can follow directly to the message
#Print all collected message data to console
print("-------------------------------------------------------")
print("Subrredit: " + str(subreddit))
print("Comment ID: " + str(comment.id))
print("Comment Author: "+ str(comment.author))
print("Timestamp: "+str(datetime.datetime.fromtimestamp(comment.created)))
if not comment.is_root:
print("Comment is a reply to: " + str(comment.parent().id))
else:
print("Comment is a reply to: -")
print("Comment Thread ID: " + str(comment.submission.id))
print("Comment Thread Title: " + str(comment.submission.title))
print("Comment Body: " + str(comment.body))
print("Comment Permalink: " + str(comment.permalink))
#Write everything to a file (outpost.csv specified earlier)
f.write("'"+subreddit+"','"+commentID+"','"+timestamp+"','"+author+"','"+threadID+"','"+threadTitle+"','"+msgBody+"','"+replyTo+"','"+permalink+"'\n")
print("Total messages collected from /r/"+subreddit+": " + str(count))
count += 1
if count > 100:
#now = time.perf_counter()
count = 1
break
else:
continue
|
from bs4 import BeautifulSoup
import requests
import time
from itertools import zip_longest
import pandas as pd, numpy as np
import json
import gspread
from googleapiclient import discovery
from oauth2client.service_account import ServiceAccountCredentials
from gspread_dataframe import get_as_dataframe, set_with_dataframe
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('tidal-digit-276902-efcc36a80aec.json', scope)
gc = gspread.authorize(credentials)
SPREADSHEET_KEY = '1DxPdf4JAcQp5LmKAEmBX2wuYW1j4ckva53Tnwm_byBg'
worksheet = gc.open_by_key(SPREADSHEET_KEY)
wb = worksheet.sheet1
url = 'https://gyao.yahoo.co.jp/ct/anime/'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
section1 = soup.find_all('h2', class_ = 'section-header-title')
time.sleep(3)
sections = soup.find_all(class_='item-carousel-container')
time.sleep(3)
section_urls = soup.find_all('a')
header_titles = []
for i in section1:
sec = i.text
header_titles.append(sec)
# Pタグではなく、sectionsからtextを取得する?が、うまくいかない
# anime_titles = []
# for section in sections:
# section_ = section.find_all('p')
# anime_titles.append(section_)
# 別の方法でタイトルを取得するlenは22
anime_titles = []
for section in soup.find_all(class_='item-carousel-container'):
p_tag = section.find_all('p')
anime_titles.append(p_tag)
anime_titles_len = []
for i in anime_titles:
length = len(i)
anime_titles_len.append(length)
cumulative = np.cumsum(anime_titles_len)
anime_titles_text = []
for i in anime_titles:
for j in i :
anime_titles_text.append(j.text)
url_lists = []
for i in soup.find_all(class_='item-carousel-container'):
a_tag = i.find_all('a')
# url_lists.append(a_tag)
for j in a_tag:
link = j.get('href')
url_lists.append(link)
# url_lists = []
# for section in sections:
# section_ = section.find_all('a')
# url_lists.append(section_)
# まとめてイッキ見! 一挙配信中のアニメ
anime1 = []
for i in anime_titles_text[cumulative[1]:cumulative[2]]:
animeTitle_ = i.split('【一挙配信】')[1]
anime1.append(animeTitle_)
# anime_titles1 = []
# for section in sections:
# anime_titles.append(section.text)
# anime_titles2 = []
# for i in anime_titles[2]:
# a = i.split('【一挙配信】')[0]
# anime_titles2.append(a)
# 現在テレビ放送中の春アニメ
# anime2 = []
# for i in anime_titles[3]:
# title = i.string
# anime2.append(title)
# time.sleep(3)
# まとめてイッキ見! 一挙配信中のアニメ
# Anime_URL1 = []
# for i in url_lists[2]:
# url = i.get('href')
# Anime_URL1.append(url)
mydict = {
header_titles[0]:anime1, 'URL':url_lists[cumulative[1]:cumulative[2]],
header_titles[1]:anime_titles_text[cumulative[2]:cumulative[3]], 'URL1':url_lists[cumulative[2]:cumulative[3]],
header_titles[2]:anime_titles_text[cumulative[3]:cumulative[4]], 'URL2':url_lists[cumulative[3]:cumulative[4]],
header_titles[5]:anime_titles_text[cumulative[6]:cumulative[7]], 'URL3':url_lists[cumulative[6]:cumulative[7]],
header_titles[6]:anime_titles_text[cumulative[7]:cumulative[8]], 'URL4':url_lists[cumulative[7]:cumulative[8]],
header_titles[7]:anime_titles_text[cumulative[8]:cumulative[9]], 'URL5':url_lists[cumulative[8]:cumulative[9]],
header_titles[8]:anime_titles_text[cumulative[9]:cumulative[10]], 'URL6':url_lists[cumulative[9]:cumulative[10]],
header_titles[9]:anime_titles_text[cumulative[10]:cumulative[11]], 'URL7':url_lists[cumulative[10]:cumulative[11]],
header_titles[10]:anime_titles_text[cumulative[11]:cumulative[12]], 'URL8':url_lists[cumulative[11]:cumulative[12]],
header_titles[11]:anime_titles_text[cumulative[12]:cumulative[13]], 'URL9':url_lists[cumulative[12]:cumulative[13]],
header_titles[12]:anime_titles_text[cumulative[13]:cumulative[14]], 'URL10':url_lists[cumulative[13]:cumulative[14]],
header_titles[13]:anime_titles_text[cumulative[14]:cumulative[15]], 'URL11':url_lists[cumulative[14]:cumulative[15]],
header_titles[14]:anime_titles_text[cumulative[15]:cumulative[16]], 'URL12':url_lists[cumulative[15]:cumulative[16]],
header_titles[15]:anime_titles_text[cumulative[16]:cumulative[17]], 'URL13':url_lists[cumulative[16]:cumulative[17]],
header_titles[16]:anime_titles_text[cumulative[17]:cumulative[18]], 'URL14':url_lists[cumulative[17]:cumulative[18]],
header_titles[17]:anime_titles_text[cumulative[18]:cumulative[19]], 'URL15':url_lists[cumulative[18]:cumulative[19]],
header_titles[18]:anime_titles_text[cumulative[19]:cumulative[20]], 'URL16':url_lists[cumulative[19]:cumulative[20]],
header_titles[19]:anime_titles_text[cumulative[20]:cumulative[21]], 'URL17':url_lists[cumulative[20]:cumulative[21]],
}
dict_df = pd.DataFrame({key:pd.Series(value) for key, value in mydict.items()})
sh = gc.open_by_key('1DxPdf4JAcQp5LmKAEmBX2wuYW1j4ckva53Tnwm_byBg').worksheet('シート1')
set_with_dataframe(sh, dict_df)
# resize=False, include_index=False
# rows = []
# for i in range(2, 20):
# rows.append(i)
# # for row, title in zip(rows, anime):
# # wb.update_cell(row, column, title)
# for row, a1,a2,a3,a4 in zip_longest(rows, anime1, anime2, anime3, anime4, fillvalue=20):
# wb.update('A:G'+str(row), [[a1,'' , a2, '', a3, '', a4]])
# anime_title_function(3, 3)
# time.sleep(3)
# def anime_url_function(list_num, column):
# Anime_URL = []
# for i in url_lists[list_num]:
# url_ = i.get('href')
# Anime_URL.append(url_)
# rows = []
# for i in range(2,len(Anime_URL)+2):
# rows.append(i)
# for row,url in zip(rows,Anime_URL):
# wb.update_cell(row, column, url)
# anime_url_function(3, 4)
# def anime_title_function(list_num,anime_title):
# # rows = []
# # for i in range(0,20):
# # rows.append(i)
|
#!/usr/bin/env python3
from Crypto.PublicKey import RSA
from Crypto.Util.number import bytes_to_long
from secret import flag, magic
while True:
try:
key = RSA.generate(2048)
a,b,c,d = magic(key)
break
except:
pass
assert a**2 + b**2 == key.n
assert c**2 + d**2 == key.n
for _ in [a,b,c,d]:
print(_)
cipher = pow(bytes_to_long(flag), key.e, key.n)
print(cipher)
|
from django.contrib import admin
from django.urls import path
from django.http import HttpResponse
from root.views import getQuery,routeToUrl
# def
urlpatterns = [
path('admin/', admin.site.urls),
path('',getQuery),
path('<slug:key>/',routeToUrl)
] |
import numpy
import glob
path_input_root = "../../argodataset_trackerformat/*"
path_inputs = glob.glob(path_input_root)
path_output = "../../tracker_output"
logid_to_city_dict = {
"089813dd-b5df-30ea-aaa7-fd5a9acf5302": "MIA",
"38a7c63f-3304-3e76-9481-8aa1c745d18c": "MIA",
"10b3a1d8-e56c-38be-aaf7-ef2f862a5c4e": "MIA",
"c9d6ebeb-be15-3df8-b6f1-5575bea8e6b9": "MIA",
"bae67a44-0f30-30c1-8999-06fc1c7ab80a": "MIA",
"9aea22e0-70d3-34e6-a0b2-b7f6afdaaa27": "DTW",
"5c251c22-11b2-3278-835c-0cf3cdee3f44": "MIA",
"38b2c7ef-069b-3d9d-bbeb-8847b8c89fb6": "PIT",
"da3d8357-54b1-321f-8efa-a0332668096f": "PIT",
"e07be70a-db1f-3731-b912-b0439065f766": "MIA",
"76a9f363-bdc5-330b-94d5-05c6e8f29bf6": "MIA",
"e4adb13f-ec05-373d-ae9a-cd3c683eb869": "MIA",
"84c35ea7-1a99-3a0c-a3ea-c5915d68acbc": "MIA",
"e7403a92-aabf-3354-af97-b20bea479d7d": "MIA",
"577ea60d-7cc0-34a4-a8ff-0401e5ab9c62": "MIA",
"693c4b41-2df6-3961-851b-3c2ddf5ea227": "MIA",
"efb48719-7c42-31da-b203-0dd4eed633dc": "MIA",
"609bd5f8-28d2-3965-a583-14e0f9752aaa": "MIA",
"649750f3-0163-34eb-a102-7aaf5384eaec": "MIA",
"033669d3-3d6b-3d3d-bd93-7985d86653ea": "PIT",
"c28501ef-cf11-3def-b358-ecd98d1284ae": "PIT",
"df30a5c0-b251-3546-8da3-7ae4503b0ab1": "MIA",
"aeb73d7a-8257-3225-972e-99307b3a5cb0": "MIA",
"a2139885-9169-3ac8-a4ca-337e1c9bf8f4": "MIA",
"f5ced2e6-de7a-3167-8ae6-174b7d311ac7": "MIA",
"0d2ee2db-4061-36b2-a330-8301bdce3fe8": "PIT",
"ff5c497e-767a-3b1a-961d-a40e69cd122e": "PIT",
"6162d72f-2990-3a30-9bba-19bbd882985c": "MIA",
"6db21fda-80cd-3f85-b4a7-0aadeb14724d": "MIA",
"b955891c-5a6e-32fe-9bca-4edbcc3e5000": "PIT",
"313b45e6-ef2e-37ce-aa26-f4b03fe685f4": "MIA",
"9da4ca63-f524-3b38-8c8b-624f17518574": "MIA",
"dd6ab742-d656-36ad-876c-fa1449710926": "MIA",
"6593dead-ead2-31d2-b6d8-5d7975c82d2b": "PIT",
"3ced8dba-62d0-3930-8f60-ebeea2feabb8": "MIA",
"a2b55686-4b10-383d-9f83-2f5b29d89c67": "PIT",
"e3dacee8-2840-3a97-92bc-497c1fea2a42": "PIT",
}
# sudo python3.6 run_tracking_multi_KF_nogt.py
# --path_input=../../argodataset_trackerformat/033669d3-3d6b-3d3d-bd93-7985d86653ea --path_output=../../tracker_output/
# --dbscan_eps=1.2 --min_point_num=50 --ground_removal_th=0.4 --city_name='PIT' --ground_level=0.4 --use_map_roi=True
# ('_roi_%d_lane_%d_fixbbox_%d') %(use_map_roi,use_map_lane,fix_bbox_size )
use_map_roi = False
use_map_lane = False
fix_bbox_size = True
text_file = open(
"run_tracker_roi_%d_lane_%d_fixbbox_%d_inverse.sh"
% (use_map_roi, use_map_lane, fix_bbox_size),
"w",
)
for i in range(len(path_inputs) - 1, 0, -1):
print(path_inputs[i])
if path_inputs[i].split("/")[-1] in logid_to_city_dict:
city_name = logid_to_city_dict[path_inputs[i].split("/")[-1]]
else:
print("city name not found!")
continue
command = (
"sudo python3.6 run_tracking_multi_KF_nogt.py --path_input=%s --path_output=%s --dbscan_eps=1.2 --min_point_num=50 --ground_removal_th=0.4 --city_name=%s --ground_level=0.4 "
% (path_inputs[i], path_output, city_name)
)
if use_map_roi:
command += " --use_map_roi=True "
if use_map_lane:
command += " --use_map_lane=True "
if fix_bbox_size:
command += " --fix_bbox_size=True "
print(command)
text_file.write(command + "\n")
text_file.close()
|
num1 = input("첫 번째 정수 : ")
num2 = input("두 번째 정수 : ")
#"2", "5"가 입력
#자료형(데이터) : 데이터가 자료형을 형변환된다.
print(int(num1) + int(num2))
print(int(num1) - int(num2))
print(int(num1) * int(num2))
num1 = int(input("첫 번째 정수 : "))
num2 = int(input("두 번째 정수 : "))
print(num1 + num2)
|
import twitter
import sys
def oauth_login():
# XXX: Go to http://twitter.com/apps/new to create an app and get values
# for these credentials that you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = 'mhF9p0bY1rj3S5YTtkIIoBNEx'
CONSUMER_SECRET = 'YxZGFGIbKLQ5TdzgMTbu7JjYkATCiyrvYQ4EtTO8GDrw5cNRd3'
OAUTH_TOKEN = '2492696552-qplePWNJQ7tERmhU8SbzUFyEbACV5hZRmRBrt2M'
OAUTH_TOKEN_SECRET = 'FwOmtWJS6xxa2w0uuhlob9OjKjgczq38E5sQKIC8QWkha'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
# Query terms
q = 'Modi' # Comma-separated list of terms
print ('Filtering the public timeline for track=',q,file=sys.stderr)
# Returns an instance of twitter.Twitter
twitter_api = oauth_login()
# Reference the self.auth parameter
twitter_stream = twitter.TwitterStream(auth=twitter_api.auth)
# See https://dev.twitter.com/docs/streaming-apis
stream = twitter_stream.statuses.filter(track=q)
# For illustrative purposes, when all else fails, search for Justin Bieber
# and something is sure to turn up (at least, on Twitter)
for tweet in stream:
print (tweet['text'])
|
import re
A = input()
ans = []
for i in range(ord('a'), ord('z') + 1):
n = len(re.findall(chr(i), A))
ans.append(n)
for k in ans:
print(k, end=' ')
# Done
|
import binascii
import logging
import codecs
from collections import deque
from .encoder import bit_field_size, get_bit_field_size, integer_range, get_range_octet_len, \
is_constrained_whole_number, is_semi_constrained_whole_number, is_constant
__all__ = ['decode']
logger = logging.getLogger("asn1perser.decoder")
class Offset(object):
def __init__(self, octets=0, bits=0):
self.prev_octets = octets
self.prev_bits = bits
self.curr_octets = octets
self.curr_bits = bits
logger.debug("Bit pointer initialized. Current position: (%s octets, %s bits)", self.curr_octets, self.curr_bits)
def __add__(self, other):
self.prev_octets = self.curr_octets
self.prev_bits = self.curr_bits
self.curr_octets += other.octets
self.curr_bits += other.bits
if self.curr_bits >= 8:
self.curr_octets += self.curr_bits // 8
self.curr_bits = self.curr_bits - 8 * (self.curr_bits // 8)
logger.debug("Bit pointer position: (%s octets, %s bits)", self.curr_octets, self.curr_bits)
return self
def align(self):
bits_to_align = ((1 + self.curr_bits // 8) * 8 - self.curr_bits) if self.curr_bits > 0 else 0
if bits_to_align:
self.prev_octets = self.curr_octets
self.prev_bits = 0
self.curr_octets += (self.curr_bits // 8 if self.curr_bits % 8 == 0 else (1 + self.curr_bits // 8))
self.curr_bits = 0
logger.debug("Aligned %s bits. Current bit pointer position: (%s octets, %s bits)",
bits_to_align, self.curr_octets, self.curr_bits)
class PerBytes(object):
def __init__(self, byte_string):
self.offset = Offset()
self.binary_string = bin(int(byte_string, 16))[2:].zfill(len(byte_string) * 4)
def next(self, size=bit_field_size(octets=1, bits=0), octet_align=False):
if octet_align:
self.offset.align()
bits = self[self.offset:self.offset + size]
logger.debug("Bits taken: '%s'", 'none' if (size.octets == 0 and size.bits == 0) else "0b" + bits)
val = int(bits, 2)
return val
def __getitem__(self, item):
bits_start = (item.start.prev_octets * 8) + item.start.prev_bits
bits_stop = (item.stop.curr_octets * 8) + item.stop.curr_bits
if bits_start == bits_stop:
return '0'
return self.binary_string[bits_start:bits_stop]
def __len__(self):
return int(len(self.binary_string) / 8)
def decode(per_stream, asn1Spec, **kwargs):
per_bytes = PerBytes(binascii.hexlify(per_stream))
logger.debug("Decoding of bytes: '%s' started. Binary representation: '%s'", codecs.encode(per_stream, "hex_codec"), per_bytes.binary_string)
decoded = asn1Spec.create_field_list(per_bytes)
if decoded == '':
return asn1Spec.__class__('')
return decoded
def decode_boolean(boolean, per_bytes): # 12
logger.info("Decoding boolean: %s", boolean.__class__.__name__)
boolean_val = per_bytes.next(size=bit_field_size(octets=0, bits=1))
logger.info("%s: value: %s", boolean.__class__.__name__, boolean_val)
logger.info("%s: decoding finished.", boolean.__class__.__name__)
return boolean.__class__(bool(boolean_val))
def decode_integer(integer, per_bytes):
logger.info("Decoding integer: %s", integer.__class__.__name__)
if integer.subtypeSpec.extensionMarker: # 13.1
logger.info("%s: extension marker should be present", integer.__class__.__name__)
decoded_value_not_within_extension_root_bit = per_bytes.next(size=bit_field_size(octets=0, bits=1))
logger.info("%s: is value within extension range: %s", integer.__class__.__name__, decoded_value_not_within_extension_root_bit)
if decoded_value_not_within_extension_root_bit:
unconstrained_whole_number = decode_unconstrained_whole_number(per_bytes)
logger.info("%s: value: %s", integer.__class__.__name__, unconstrained_whole_number)
logger.info("%s: decoding finished.", integer.__class__.__name__)
return integer.__class__(unconstrained_whole_number)
else:
constrained_whole_number = decode_constrained_whole_number_from_field_list(per_bytes=per_bytes,
lowerEndpoint=integer.subtypeSpec.lowerEndpoint,
upperEndpoint=integer.subtypeSpec.upperEndpoint)
logger.info("%s: value: %s", integer.__class__.__name__, constrained_whole_number)
logger.info("%s: decoding finished.", integer.__class__.__name__)
return integer.__class__(constrained_whole_number)
else:
if is_constant(integer):
logger.error("%s: constant integer is not supported")
raise NotImplemented
elif is_constrained_whole_number(integer):
if integer.subtypeSpec.lowerEndpoint == integer.subtypeSpec.upperEndpoint: # 13.2.1
logger.info("%s: is contrained to single value: %s", integer.__class__.__name__, integer.subtypeSpec.lowerEndpoint)
logger.info("%s: decoding finished.", integer.__class__.__name__)
return (integer.__class__(integer.subtypeSpec.lowerEndpoint))
else: # 13.2.2
logger.info("%s: is contrained between %s and %s. Decoding value...", integer.__class__.__name__, integer.subtypeSpec.lowerEndpoint, integer.subtypeSpec.upperEndpoint)
constrained_whole_number = decode_constrained_whole_number_from_field_list(per_bytes=per_bytes,
lowerEndpoint=integer.subtypeSpec.lowerEndpoint,
upperEndpoint=integer.subtypeSpec.upperEndpoint)
logger.info("%s: value: %s", integer.__class__.__name__, constrained_whole_number)
logger.info("%s: decoding finished.", integer.__class__.__name__)
return integer.__class__(constrained_whole_number)
elif is_semi_constrained_whole_number(integer):
logger.info("%s: is semi contrained - minimum value: %s", integer.__class__.__name__, integer.subtypeSpec.lowerEndpoint)
semi_constrained_whole_number = decode_semi_constrained_whole_number_from_field_list(per_bytes=per_bytes,
lowerBound=integer.subtypeSpec.lowerEndpoint)
logger.info("%s: value: %s", integer.__class__.__name__, semi_constrained_whole_number)
logger.info("%s: decoding finished.", integer.__class__.__name__)
return integer.__class__(semi_constrained_whole_number)
else: # 13.2.4
logger.info("%s: is unconstrained whole number. Decoding value...", integer.__class__.__name__)
unconstrained_whole_number = decode_unconstrained_whole_number(per_bytes)
logger.info("%s: value: %s", integer.__class__.__name__, unconstrained_whole_number)
logger.info("%s: decoding finished.", integer.__class__.__name__)
return integer.__class__(unconstrained_whole_number)
def decode_enumerated(enumerated, per_bytes):
logger.info("Decoding enumerated: %s", enumerated.__class__.__name__)
if not enumerated.subtypeSpec.extensionMarker: # 14.2
enumeration_index = decode_constrained_whole_number(per_bytes, lowerBound=0, upperBound=len(list(enumerated.namedValues)) - 1)
logger.info("%s: index: %s", enumerated.__class__.__name__, enumeration_index)
logger.info("%s: decoding finished.", enumerated.__class__.__name__)
return enumerated.__class__(enumerated.namedValues.getName(enumeration_index))
else: # 14.3
logger.info("%s: extension marker should be present", enumerated.__class__.__name__)
value_is_extension_addition = per_bytes.next(size=bit_field_size(octets=0, bits=1))
logger.info("%s: is index within extension range: %s", enumerated.__class__.__name__, value_is_extension_addition)
if not value_is_extension_addition:
enumeration_index = decode_constrained_whole_number(per_bytes, lowerBound=0,
upperBound=len(list(enumerated.enumerationRoot)) - 1)
logger.info("%s: index: %s", enumerated.__class__.__name__, enumeration_index)
logger.info("%s: decoding finished.", enumerated.__class__.__name__)
return enumerated.__class__(enumerated.namedValues.getName(enumeration_index))
else:
enumeration_index = decode_normally_small_non_negative_whole_number(per_bytes, lower_bound=0)
enumeration_index += len(enumerated.enumerationRoot)
logger.info("%s: index: %s", enumerated.__class__.__name__, enumeration_index)
logger.info("%s: decoding finished.", enumerated.__class__.__name__)
return enumerated.__class__(enumerated.namedValues.getName(enumeration_index))
def decode_bitstring(bitstring, per_bytes):
logger.info("Decoding bitstring: %s", bitstring.__class__.__name__)
if bitstring.subtypeSpec.extensionMarker: # 16.6
logger.info("%s: extension marker should be present", bitstring.__class__.__name__)
extension_bit_field = per_bytes.next(size=bit_field_size(octets=0, bits=1))
logger.info("%s: is bitstring value extened: %s", bitstring.__class__.__name__, extension_bit_field)
if not extension_bit_field:
pass
else:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
bitstring_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=0,
upperBound=None)
bitstring_val = per_bytes.next(size=bit_field_size(octets=0, bits=bitstring_length_determinant))
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
bitstring_val = bin(bitstring_val)[2:].zfill(bitstring_length_determinant)
logger.info("%s: value: %s", bitstring.__class__.__name__, bitstring_val)
logger.info("%s: decoding finished.", bitstring.__class__.__name__)
return bitstring.__class__(bitstring_val)
if bitstring.subtypeSpec.upperEndpoint == 0: # 16.8
logger.info("%s: upperEndpoint is 0 so value is empty string: ''", bitstring.__class__.__name__)
logger.info("%s: decoding finished.", bitstring.__class__.__name__)
return ''
if (bitstring.subtypeSpec.lowerEndpoint is not None) and (bitstring.subtypeSpec.lowerEndpoint == bitstring.subtypeSpec.upperEndpoint):
logger.info("%s: lowerEndpoint == upperEndpoint == %s", bitstring.__class__.__name__, bitstring.subtypeSpec.lowerEndpoint)
if bitstring.subtypeSpec.lowerEndpoint <= 16: # 16.9
bitstring_val = per_bytes.next(size=bit_field_size(octets=0, bits=bitstring.subtypeSpec.lowerEndpoint))
bitstring_val = bin(bitstring_val)[2:].zfill(bitstring.subtypeSpec.lowerEndpoint)
logger.info("%s: value: %s", bitstring.__class__.__name__, bitstring_val)
logger.info("%s: decoding finished.", bitstring.__class__.__name__)
return bitstring.__class__(bitstring_val)
elif 16 < bitstring.subtypeSpec.lowerEndpoint < 65536: # 16.10
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
bitstring_val = per_bytes.next(size=bit_field_size(octets=0, bits=bitstring.subtypeSpec.lowerEndpoint))
bitstring_val = bin(bitstring_val)[2:].zfill(bitstring.subtypeSpec.lowerEndpoint)[:bitstring.subtypeSpec.lowerEndpoint]
logger.info("%s: value: %s", bitstring.__class__.__name__, bitstring_val)
logger.info("%s: decoding finished.", bitstring.__class__.__name__)
return bitstring.__class__(bitstring_val)
else:
logger.error("%s: lowerEndpoint >= 65536 not supported", bitstring.__class__.__name__)
raise NotImplemented
else: # 16.11
if bitstring.subtypeSpec.upperEndpoint and bitstring.subtypeSpec.upperEndpoint < 65536:
logger.info("%s: lowerEndpoint (%s) and upperEdnpoint (%s) < 65536", bitstring.__class__.__name__,
bitstring.subtypeSpec.lowerEndpoint, bitstring.subtypeSpec.upperEndpoint)
bitstring_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=True,
n=per_bytes,
lowerBound=bitstring.subtypeSpec.lowerEndpoint,
upperBound=bitstring.subtypeSpec.upperEndpoint)
bitstring_val = per_bytes.next(size=bit_field_size(octets=0, bits=bitstring_length_determinant), octet_align=True)
bitstring_val = bin(bitstring_val)[2:].zfill(bitstring_length_determinant)
logger.info("%s: value: %s", bitstring.__class__.__name__, bitstring_val)
logger.info("%s: decoding finished.", bitstring.__class__.__name__)
return bitstring.__class__(bitstring_val)
else:
logger.info("%s: lowerEndpoint = %s; upperEndpoint = %s", bitstring.__class__.__name__,
bitstring.subtypeSpec.lowerEndpoint, bitstring.subtypeSpec.upperEndpoint)
bitstring_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=0,
upperBound=None)
bitstring_val = per_bytes.next(size=bit_field_size(octets=0, bits=bitstring_length_determinant), octet_align=True)
bitstring_val = bin(bitstring_val)[2:].zfill(bitstring_length_determinant)
logger.info("%s: value: %s", bitstring.__class__.__name__, bitstring_val)
logger.info("%s: decoding finished.", bitstring.__class__.__name__)
return bitstring.__class__(bitstring_val)
def decode_octetstring(octetstring, per_bytes):
logger.info("Decoding octetstring: %s", octetstring.__class__.__name__)
if octetstring.subtypeSpec.extensionMarker: # 17.3
logger.info("%s: extension marker should be present", octetstring.__class__.__name__)
extension_bit_field = per_bytes.next(size=bit_field_size(octets=0, bits=1))
logger.info("%s: is octetstring value extened: %s", octetstring.__class__.__name__, extension_bit_field)
if not extension_bit_field:
pass
else:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
octetstring_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=0,
upperBound=None)
if octetstring_length_determinant == 0:
logger.info("%s: length determinant is 0 so value is empty string: ''", octetstring.__class__.__name__)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(''))
octetstring_val = per_bytes.next(size=bit_field_size(octets=octetstring_length_determinant, bits=0))
octetstring_val = hex(octetstring_val)[2:].rstrip("L").zfill(octetstring_length_determinant * 2)
logger.info("%s: value: %s", octetstring.__class__.__name__, octetstring_val)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(octetstring_val))
if octetstring.subtypeSpec.upperEndpoint == 0: # 17.5
logger.info("%s: upperEndpoint is 0 so value is empty string: ''", octetstring.__class__.__name__)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return ''
if (octetstring.subtypeSpec.lowerEndpoint is not None) and (octetstring.subtypeSpec.lowerEndpoint == octetstring.subtypeSpec.upperEndpoint):
logger.info("%s: lowerEndpoint == upperEndpoint == %s", octetstring.__class__.__name__,
octetstring.subtypeSpec.lowerEndpoint)
if octetstring.subtypeSpec.lowerEndpoint <= 2: # 17.6
octetstring_val = per_bytes.next(size=bit_field_size(octets=octetstring.subtypeSpec.lowerEndpoint, bits=0))
octetstring_val = hex(octetstring_val)[2:].rstrip("L").zfill(octetstring.subtypeSpec.lowerEndpoint * 2)
logger.info("%s: value: %s", octetstring.__class__.__name__, octetstring_val)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(octetstring_val))
elif 2 < octetstring.subtypeSpec.lowerEndpoint < 65536: # 17.7
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
octetstring_val = per_bytes.next(size=bit_field_size(octets=octetstring.subtypeSpec.lowerEndpoint, bits=0))
octetstring_val = hex(octetstring_val)[2:].rstrip("L").zfill(octetstring.subtypeSpec.lowerEndpoint * 2)
logger.info("%s: value: %s", octetstring.__class__.__name__, octetstring_val)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(octetstring_val))
else:
logger.error("%s: lowerEndpoint >= 65536 not supported", octetstring.__class__.__name__)
raise NotImplemented
else: # 17.8
logger.info("%s: lowerEndpoint = %s; upperEndpoint = %s", octetstring.__class__.__name__,
octetstring.subtypeSpec.lowerEndpoint, octetstring.subtypeSpec.upperEndpoint)
if octetstring.subtypeSpec.upperEndpoint:
octetstring_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=True,
n=per_bytes,
lowerBound=octetstring.subtypeSpec.lowerEndpoint,
upperBound=octetstring.subtypeSpec.upperEndpoint)
if octetstring_length_determinant == 0:
logger.info("%s: length determinant is 0 so value is empty string: ''", octetstring.__class__.__name__)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(''))
octetstring_val = per_bytes.next(size=bit_field_size(octets=octetstring_length_determinant, bits=0), octet_align=True)
octetstring_val = hex(octetstring_val)[2:].rstrip("L")
logger.info("%s: value: %s", octetstring.__class__.__name__, octetstring_val)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(octetstring_val))
else:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
octetstring_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=0,
upperBound=None)
octetstring_val = per_bytes.next(size=bit_field_size(octets=octetstring_length_determinant, bits=0))
octetstring_val = hex(octetstring_val)[2:].rstrip("L").zfill(octetstring_length_determinant * 2)
logger.info("%s: value: %s", octetstring.__class__.__name__, octetstring_val)
logger.info("%s: decoding finished.", octetstring.__class__.__name__)
return octetstring.__class__(octetstring.__class__.fromHexString(octetstring_val))
def decode_sequence(sequence, per_bytes):
logger.info("Decoding sequence: %s", sequence.__class__.__name__)
new_seq = sequence.__class__()
extension_bit = 0
optional_default_bit_field_len = 0
optional_default_bit_field_value = 0
if sequence.subtypeSpec.extensionMarker: # 19.1
logger.info("%s: extension marker should be present", sequence.__class__.__name__)
extension_bit = per_bytes.next(size=bit_field_size(octets=0, bits=1))
logger.info("%s: extension bit: %s", sequence.__class__.__name__, extension_bit)
for namedType in sequence.rootComponent.namedTypes: # 19.2
if namedType.isOptional or namedType.isDefaulted:
optional_default_bit_field_len += 1
logger.info("%s: number of optional fields: %s", sequence.__class__.__name__, optional_default_bit_field_len)
if optional_default_bit_field_len:
if optional_default_bit_field_len < 65536: # 19.3
optional_default_bit_field_value = per_bytes.next(size=bit_field_size(octets=0, bits=optional_default_bit_field_len))
optional_default_bit_field_value = bin(optional_default_bit_field_value)[2:].zfill(optional_default_bit_field_len)
logger.info("%s: optional fields bits: %s", sequence.__class__.__name__, optional_default_bit_field_value)
optional_default_bit_field_value = deque(optional_default_bit_field_value)
else:
logger.error("%s: optional_default_bit_field_len >= 65536 not supported.")
raise NotImplemented
for namedType in sequence.rootComponent.namedTypes:
if namedType.isOptional:
is_present = int(optional_default_bit_field_value.popleft())
if is_present:
logger.info("%s: decoding optional component: '%s' of type '%s' ...", sequence.__class__.__name__,
namedType.name, namedType.asn1Object.__class__.__name__)
componentType = sequence.getComponentByName(namedType.name)
decoded = componentType.create_field_list(per_bytes)
logger.info("%s: decoded component: '%s'", sequence.__class__.__name__, namedType.name)
new_seq[namedType.name] = decoded
else:
logger.info("%s: optional component '%s' not present", sequence.__class__.__name__, namedType.name)
continue
elif namedType.isDefaulted: # CANONICAL-PER
is_present = int(optional_default_bit_field_value.popleft())
if is_present:
logger.info("%s: decoding default component: '%s' of type '%s' ...", sequence.__class__.__name__,
namedType.name, namedType.asn1Object.__class__.__name__)
componentType = sequence.getComponentByName(namedType.name)
decoded = componentType.create_field_list(per_bytes)
logger.info("%s: decoded component: '%s'", sequence.__class__.__name__, namedType.name)
new_seq[namedType.name] = decoded
else:
logger.info("%s: default component '%s' not present", sequence.__class__.__name__, namedType.name)
continue
else:
logger.info("%s: decoding component: '%s' of type '%s' ...", sequence.__class__.__name__,
namedType.name, namedType.asn1Object.__class__.__name__)
componentType = sequence.getComponentByName(namedType.name)
decoded = componentType.create_field_list(per_bytes)
logger.info("%s: decoded component: '%s'", sequence.__class__.__name__, namedType.name)
new_seq[namedType.name] = decoded
if extension_bit: # 19.7
extension_addition_len = len(sequence.extensionAddition.namedTypes) if sequence.extensionAddition else 0
extension_addition_group_len = len(sequence.extensionAdditionGroups)
extension_addition_bit_field_len = extension_addition_len + extension_addition_group_len
extension_addition_length_determinant = decode_length_determinant(normally_small_length=True,
constrained=False,
n=per_bytes,
lowerBound=None,
upperBound=None)
extension_addition_bit_field_value = per_bytes.next(size=bit_field_size(octets=0, bits=extension_addition_length_determinant))
extension_addition_bit_field_bits = bin(extension_addition_bit_field_value)[2:].zfill(extension_addition_bit_field_len)
logger.info("%s: extension bits: '%s'; within that: extension addition bits: '%s'; extension addition group bits: '%s'",
sequence.__class__.__name__,
extension_addition_bit_field_bits,
extension_addition_bit_field_bits[:extension_addition_len],
extension_addition_bit_field_bits[extension_addition_len:])
per_bytes.next(bit_field_size(octets=0, bits=0), octet_align=True)
if sequence.extensionAddition:
for extensionNamedType, is_present_bit in zip(sequence.extensionAddition.namedTypes,
extension_addition_bit_field_bits[:extension_addition_len]):
if int(is_present_bit):
logger.info("%s: extension addition component '%s' present. Decoding...", sequence.__class__.__name__,
extensionNamedType.name)
componentType = sequence.getComponentByName(extensionNamedType.name)
decoded = decode_open_type_field(componentType, per_bytes)
logger.info("%s: decoded extension addition component: '%s'", sequence.__class__.__name__, extensionNamedType.name)
new_seq[extensionNamedType.name] = decoded
else:
logger.info("%s: extension addition component '%s' not present.", sequence.__class__.__name__,
extensionNamedType.name)
if sequence.extensionAdditionGroups:
for group_index, (extensionAdditionGroup, is_present_bit) in enumerate(zip(sequence.extensionAdditionGroups,
extension_addition_bit_field_bits[extension_addition_len:]), 1):
if int(is_present_bit):
logger.info("%s: extension addition group number %s present. Decoding as separate sequence with name 'INNER_SEQUENCE' ...",
sequence.__class__.__name__, group_index)
group_sequence = create_new_fake_empty_sequence(rootComponent=extensionAdditionGroup)
decoded = decode_open_type_field(group_sequence, per_bytes)
logger.info("%s: extension addition group decoded; Components: '%s'",
sequence.__class__.__name__,
', '.join([type_name for type_name in decoded.named_types.keys()]))
for type_name, type_value in decoded.named_types.items():
new_seq[type_name] = type_value
else:
logger.info("%s: extension addition group number %s not present",
sequence.__class__.__name__, group_index)
logger.info("%s: decoding finished.", sequence.__class__.__name__)
return new_seq
def decode_sequence_of(sequence_of, per_bytes):
logger.info("Decoding sequence of: %s", sequence_of.__class__.__name__)
new_seq_of = sequence_of.__class__()
component_type = sequence_of.getComponentType()
if sequence_of.subtypeSpec.extensionMarker: # 20.4
logger.info("%s: extension marker should be present", sequence_of.__class__.__name__)
extension_bit_field = per_bytes.next(size=bit_field_size(octets=0, bits=1), octet_align=False)
logger.info("%s: extension bit: %s", sequence_of.__class__.__name__, extension_bit_field)
if not extension_bit_field:
length_determinant = decode_length_determinant(normally_small_length=False,
constrained=True,
n=per_bytes,
lowerBound=sequence_of.subtypeSpec.lowerEndpoint,
upperBound=sequence_of.subtypeSpec.upperEndpoint)
logger.info("%s: decoding %s component(s) of type: '%s'", sequence_of.__class__.__name__,
length_determinant, component_type.__class__.__name__)
for index in range(length_determinant):
val = component_type.create_field_list(per_bytes)
logger.info("%s: decoding of component '%s' number %s finished", sequence_of.__class__.__name__,
component_type.__class__.__name__, index + 1)
new_seq_of.extend([val])
logger.info("%s: decoding finished.", sequence_of.__class__.__name__)
return new_seq_of
else:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=0,
upperBound=None)
logger.info("%s: decoding %s component(s) of type: '%s'", sequence_of.__class__.__name__,
length_determinant, component_type.__class__.__name__)
for index in range(length_determinant):
val = component_type.create_field_list(per_bytes)
logger.info("%s: decoding of component '%s' number %s finished", sequence_of.__class__.__name__,
component_type.__class__.__name__, index + 1)
new_seq_of.extend([val])
logger.info("%s: decoding finished.", sequence_of.__class__.__name__)
return new_seq_of
elif (sequence_of.subtypeSpec.lowerEndpoint == sequence_of.subtypeSpec.upperEndpoint) and (sequence_of.subtypeSpec.upperEndpoint < 65536): # 20.5
logger.info("%s: lowerEndpoint == upperEndpoint and upperEndpoint < 65536")
logger.info("%s: decoding %s component(s) of type: '%s'", sequence_of.__class__.__name__,
sequence_of.subtypeSpec.lowerEndpoint, component_type.__class__.__name__)
for index in range(sequence_of.subtypeSpec.lowerEndpoint):
val = component_type.create_field_list(per_bytes)
logger.info("%s: decoding of component '%s' number %s finished", sequence_of.__class__.__name__,
component_type.__class__.__name__, index + 1)
new_seq_of.extend([val])
logger.info("%s: decoding finished.", sequence_of.__class__.__name__)
return new_seq_of
else: # 20.6
logger.info("%s: lowerEndpoint = %s and upperEndpoint = %s", sequence_of.__class__.__name__,
sequence_of.subtypeSpec.lowerEndpoint, sequence_of.subtypeSpec.upperEndpoint)
if sequence_of.subtypeSpec.upperEndpoint:
length_determinant = decode_length_determinant(normally_small_length=False,
constrained=True,
n=per_bytes,
lowerBound=sequence_of.subtypeSpec.lowerEndpoint,
upperBound=sequence_of.subtypeSpec.upperEndpoint)
logger.info("%s: decoding %s component(s) of type: '%s'", sequence_of.__class__.__name__,
length_determinant, component_type.__class__.__name__)
for index in range(length_determinant):
val = component_type.create_field_list(per_bytes)
logger.info("%s: decoding of component '%s' number %s finished", sequence_of.__class__.__name__,
component_type.__class__.__name__, index + 1)
new_seq_of.extend([val])
logger.info("%s: decoding finished.", sequence_of.__class__.__name__)
return new_seq_of
else:
length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=sequence_of.subtypeSpec.lowerEndpoint,
upperBound=None)
logger.info("%s: decoding %s component(s) of type: '%s'", sequence_of.__class__.__name__,
length_determinant, component_type.__class__.__name__)
for index in range(length_determinant):
val = component_type.create_field_list(per_bytes)
logger.info("%s: decoding of component '%s' number %s finished", sequence_of.__class__.__name__,
component_type.__class__.__name__, index + 1)
new_seq_of.extend([val])
logger.info("%s: decoding finished.", sequence_of.__class__.__name__)
return new_seq_of
def decode_choice(choice, per_bytes): # 23
logger.info("Decoding choice: %s", choice.__class__.__name__)
new_choice = choice.__class__()
n = len(choice.rootComponent.namedTypes) - 1
if not choice.subtypeSpec.extensionMarker: # 23.6
if len(choice.rootComponent.namedTypes) == 1: # 23.4
logger.info("%s: length of choice list is 1", choice.__class__.__name__)
selected_type = choice.rootComponent.namedTypes[0]
selected_component = choice.getComponentByName(selected_type.name)
logger.info("%s: chosen component: '%s' of type '%s' has index 0. Decoding...", choice.__class__.__name__,
selected_type.name, selected_component.__class__.__name__)
val = selected_component.create_field_list(per_bytes)
logger.info("%s: decoding of chosen component '%s' finished", choice.__class__.__name__, selected_type.name)
new_choice[selected_type.name] = val
logger.info("%s: decoding finished.", choice.__class__.__name__)
return new_choice
else: # 23.6
index = decode_constrained_whole_number_from_field_list(per_bytes, lowerEndpoint=0, upperEndpoint=n)
namedType = choice.rootComponent.namedTypes[index]
selected_component = choice.getComponentByPosition(index)
logger.info("%s: chosen component: '%s' of type '%s' has index %s. Decoding...", choice.__class__.__name__,
namedType.name, selected_component.__class__.__name__, index)
val = selected_component.create_field_list(per_bytes)
logger.info("%s: decoding of chosen component '%s' finished", choice.__class__.__name__, namedType.name)
new_choice[namedType.name] = val
logger.info("%s: decoding finished.", choice.__class__.__name__)
return new_choice
else: # 23.5
logger.info("%s: extension marker should be present", choice.__class__.__name__)
extension_bit = per_bytes.next(size=bit_field_size(octets=0, bits=1), octet_align=False)
logger.info("%s: extension bit: %s", choice.__class__.__name__, extension_bit)
if not extension_bit: # 23.7
if len(choice.rootComponent.namedTypes) == 1: # 23.4
logger.info("%s: length of choice list is 1", choice.__class__.__name__)
selected_type = choice.rootComponent.namedTypes[0]
selected_component = choice.getComponentByName(selected_type.name)
logger.info("%s: chosen component: '%s' of type '%s' has index 0. Decoding...",
choice.__class__.__name__,
selected_type.name, selected_component.__class__.__name__)
val = selected_component.create_field_list(per_bytes)
logger.info("%s: decoding of chosen component '%s' finished", choice.__class__.__name__,
selected_type.name)
new_choice[selected_type.name] = val
logger.info("%s: decoding finished.", choice.__class__.__name__)
return new_choice
else:
index = decode_constrained_whole_number_from_field_list(per_bytes, lowerEndpoint=0, upperEndpoint=n)
namedType = choice.rootComponent.namedTypes[index]
selected_component = choice.getComponentByPosition(index)
logger.info("%s: chosen component: '%s' of type '%s' has index %s. Decoding...",
choice.__class__.__name__,
namedType.name, selected_component.__class__.__name__, index)
val = selected_component.create_field_list(per_bytes)
logger.info("%s: decoding of chosen component '%s' finished", choice.__class__.__name__, namedType.name)
new_choice[namedType.name] = val
logger.info("%s: decoding finished.", choice.__class__.__name__)
return new_choice
else: # 23.8
index = decode_normally_small_non_negative_whole_number(per_bytes, lower_bound=0)
if choice.extensionAddition and not choice.extensionAdditionGroups:
logger.info("%s: has only extension addition", choice.__class__.__name__)
choice_extension = choice.extensionAddition
elif not choice.extensionAddition and choice.extensionAdditionGroups:
logger.info("%s: has only extension addition groups", choice.__class__.__name__)
choice_extension = choice.extensionAdditionGroups[0].__class__(*[named_type for additive_named_types in choice.extensionAdditionGroups for
named_type in additive_named_types._NamedTypes__namedTypes])
elif choice.extensionAddition and choice.extensionAdditionGroups:
logger.info("%s: has extension addition and extension addition groups", choice.__class__.__name__)
choice_extension = choice.extensionAddition + choice.extensionAdditionGroups
else: # is this a valid case?
logger.error("%s: extension bit is present but neither extension addition nor extension addition groups are.",
choice.__class__.__name__)
raise NotImplemented
namedType = choice_extension.namedTypes[index]
selected_component = choice.getComponentByName(namedType.name)
logger.info("%s: chosen component: '%s' of type '%s' has index %s (indexed from extension choices). Decoding...",
choice.__class__.__name__,
namedType.name, selected_component.__class__.__name__, index)
val = decode_open_type_field(selected_component, per_bytes)
logger.info("%s: decoding of chosen component '%s' finished", choice.__class__.__name__, namedType.name)
new_choice[namedType.name] = val
logger.info("%s: decoding finished.", choice.__class__.__name__)
return new_choice
def decode_constrained_whole_number_from_field_list(per_bytes, lowerEndpoint, upperEndpoint):
logger.debug("function: decode_constrained_whole_number_from_field_list; params: per_bytes: (hidden), "
"lowerEndpoint: %s, upperEndpoint: %s", lowerEndpoint, upperEndpoint)
if integer_range(lowerEndpoint, upperEndpoint) <= 65536: # 13.2.5
return decode_constrained_whole_number(per_bytes, lowerBound=lowerEndpoint, upperBound=upperEndpoint)
else: # 13.2.6
upperBound = get_range_octet_len(upperEndpoint - lowerEndpoint)
constrained_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=True,
n=per_bytes,
lowerBound=1,
upperBound=upperBound)
val = per_bytes.next(bit_field_size(octets=constrained_length_determinant, bits=0), octet_align=True)
return lowerEndpoint + val
def decode_semi_constrained_whole_number_from_field_list(per_bytes, lowerBound):
logger.debug("function: decode_semi_constrained_whole_number_from_field_list; params: per_bytes: (hidden), lowerBound: %s",
lowerBound)
unconstrained_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=1,
upperBound=None)
logger.debug("semi constranied length determinant: %s", unconstrained_length_determinant)
val = per_bytes.next(bit_field_size(octets=unconstrained_length_determinant, bits=0))
logger.debug("semi constrained whole number value: %s", val)
return lowerBound + val
def decode_open_type_field(type, per_bytes):
unconstrained_length_determinant = decode_length_determinant(normally_small_length=False,
constrained=False,
n=per_bytes,
lowerBound=None,
upperBound=None)
val = per_bytes.next(size=bit_field_size(octets=unconstrained_length_determinant, bits=0), octet_align=True)
open_type_value = PerBytes(hex(val)[2:].rstrip("L").zfill(unconstrained_length_determinant * 2))
decoded = type.create_field_list(open_type_value)
return decoded
def decode_constrained_whole_number(per_bytes, lowerBound, upperBound):
logger.debug("function: decode_constrained_whole_number; params: per_bytes: (hidden), lowerBound: %s, "
"upperBound: %s", lowerBound, upperBound)
range = integer_range(lowerBound, upperBound)
field_size = get_bit_field_size(range)
logger.debug("constrained whole number: range: %s; size: %s", range, field_size)
if range == 1:
return 0
elif range <= 255:
non_negative_binary_integer = decode_non_negative_binary_integer(per_bytes, field_size, octet_align=False)
elif range == 256:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
non_negative_binary_integer = decode_non_negative_binary_integer(per_bytes, field_size, octet_align=True)
elif 257 <= range <= 65536:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
non_negative_binary_integer = decode_non_negative_binary_integer(per_bytes, field_size, octet_align=True)
elif range > 65536:
non_negative_binary_integer = decode_non_negative_binary_integer(per_bytes, field_size,
octet_align=True,
minimum_number_of_octets=True)
return non_negative_binary_integer
return lowerBound + non_negative_binary_integer
def decode_normally_small_non_negative_whole_number(per_bytes, lower_bound): # 11.6
logger.debug("function: decode_normally_small_non_negative_whole_number; params: per_bytes: (hidden), lowerBound: %s",
lower_bound)
single_bit_field = per_bytes.next(size=bit_field_size(octets=0, bits=1))
if not single_bit_field: # 11.6.1 # n <= 63
normally_small_non_negative_whole_number = decode_non_negative_binary_integer(per_bytes=per_bytes,
field_size=bit_field_size(octets=0, bits=6),
octet_align=False)
return normally_small_non_negative_whole_number
else: # 11.6.2
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
semi_constrained_whole_number = decode_semi_constrained_whole_number_from_field_list(per_bytes, lowerBound=lower_bound)
return semi_constrained_whole_number
def decode_unconstrained_whole_number(per_bytes):
logger.debug("function: decode_unconstrained_whole_number; params: per_bytes: (hidden)")
unconstrained_length_determinant = per_bytes.next(octet_align=True)
logger.debug("unconstarined length determinant: %s", unconstrained_length_determinant)
unconstrained_whole_number = twos_comp(
per_bytes=per_bytes.next(bit_field_size(octets=unconstrained_length_determinant, bits=0)),
bits=8*unconstrained_length_determinant)
logger.debug("unconstarined whole number: %s", unconstrained_whole_number)
return unconstrained_whole_number
def decode_non_negative_binary_integer(per_bytes, field_size, octet_align, minimum_number_of_octets=False):
logger.debug("function: decode_non_negative_binary_integer; params: per_bytes: (hidden), field_size: %s, octet_align: %s, "
"minimum_number_of_octets: %s", field_size, octet_align, minimum_number_of_octets)
if minimum_number_of_octets:
logger.error("Parameter 'minimum_number_of_octets' no supported.")
raise NotImplemented
else:
decoded = per_bytes.next(field_size)
logger.debug("Non-negative binary integer value: '%s'", decoded)
if octet_align:
per_bytes.next(size=bit_field_size(octets=0, bits=0), octet_align=octet_align)
return decoded
def twos_comp(per_bytes, bits):
"""compute the 2's complement of int value val"""
if (per_bytes & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
per_bytes = per_bytes - (1 << bits) # compute negative value
return per_bytes
def decode_length_determinant(normally_small_length, constrained, n, lowerBound, upperBound):
logger.debug("function: decode_length_determinant; params: normally_small_length: %s, constrained: %s, "
"n (per_bytes): (hidden), lowerBound: %s, upperBound: %s", normally_small_length, constrained, lowerBound,
upperBound)
if normally_small_length: # 11.9.3.4
single_bit_field = n.next(size=bit_field_size(octets=0, bits=1))
if not single_bit_field: # n <= 64
normally_small_length_determinant_size = decode_non_negative_binary_integer(per_bytes=n,
field_size=bit_field_size(octets=0, bits=6),
octet_align=False)
return normally_small_length_determinant_size + 1
else: # n > 64
raise NotImplemented
elif constrained:
constrained_length_determinant = decode_constrained_whole_number(per_bytes=n, lowerBound=lowerBound, upperBound=upperBound)
return constrained_length_determinant
else:
n.next(size=bit_field_size(octets=0, bits=0), octet_align=True)
first_bit = n.next(bit_field_size(octets=0, bits=1))
if not first_bit: # 11.9.3.6
unconstrained_length_determinant = decode_non_negative_binary_integer(per_bytes=n,
field_size=bit_field_size(octets=0, bits=7),
octet_align=True)
return unconstrained_length_determinant
else:
second_bit = n.next(bit_field_size(octets=0, bits=1))
if not second_bit: # 11.9.3.7
unconstrained_length_determinant = decode_non_negative_binary_integer(per_bytes=n,
field_size=bit_field_size(
octets=1, bits=6),
octet_align=True)
return unconstrained_length_determinant
else:
raise NotImplemented
def create_new_fake_empty_sequence(rootComponent):
class FooConstraint(object):
pass
class INNER_SEQUENCE(object):
subtypeSpec = FooConstraint()
def __init__(self):
self.named_types = {}
def create_field_list(self, per_bytes):
return decode_sequence(self, per_bytes)
def getComponentByName(self, name):
idx = self.rootComponent.getPositionByName(name)
return self.rootComponent.getTypeByPosition(idx)
def __setitem__(self, key, value):
self.named_types[key] = value
INNER_SEQUENCE.subtypeSpec.extensionMarker = False
INNER_SEQUENCE.subtypeSpec.lowerEndpoint= None
INNER_SEQUENCE.subtypeSpec.upperEndpoint= None
INNER_SEQUENCE.rootComponent = rootComponent
INNER_SEQUENCE.componentType = rootComponent
INNER_SEQUENCE.extensionAddition = None
INNER_SEQUENCE.extensionAdditionGroups = []
return INNER_SEQUENCE()
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import math
import torch.utils.model_zoo as model_zoo
import time
import torch.nn.functional as F
import faiss
import json
import os
import os.path
from collections import OrderedDict
def robust_norm(var):
'''
:param var: Variable of BxCxHxW
:return: p-norm of BxCxW
'''
result = ((var**2).sum(dim=2) + 1e-8).sqrt()
# result = (var ** 2).sum(dim=2)
# try to make the points less dense, caused by the backward loss
# result = result.clamp(min=7e-3, max=None)
return result
class CrossEntropyLossSeg(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLossSeg, self).__init__()
self.nll_loss = nn.NLLLoss(weight, size_average)
def forward(self, inputs, targets):
'''
:param inputs: BxclassxN
:param targets: BxN
:return:
'''
inputs = inputs.unsqueeze(3)
targets = targets.unsqueeze(2)
return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
def visualize_pc_seg(score, seg, label, visualizer, opt, input_pc, batch_num):
# display only one instance of pc/img
input_pc_np = input_pc.cpu().numpy().transpose() # Nx3
pc_color_np = np.ones(input_pc_np.shape, dtype=int) # Nx3
gt_pc_color_np = np.ones(input_pc_np.shape, dtype=int) # Nx3
# construct color map
_, predicted_seg = torch.max(score, dim=0, keepdim=False) # 50xN -> N
predicted_seg_np = predicted_seg.cpu().numpy() # N
gt_seg_np = seg.cpu().numpy() # N
color_map_file = os.path.join(opt.dataroot, 'part_color_mapping.json')
color_map = json.load(open(color_map_file, 'r'))
color_map_np = np.fabs((np.asarray(color_map) * 255)).astype(int) # 50x3
for i in range(input_pc_np.shape[0]):
pc_color_np[i] = color_map_np[predicted_seg_np[i]]
gt_pc_color_np[i] = color_map_np[gt_seg_np[i]]
if gt_seg_np[i] == 49:
gt_pc_color_np[i] = np.asarray([1, 1, 1]).astype(int)
dict = OrderedDict([('pc_colored_predicted', [input_pc_np, pc_color_np]),
('pc_colored_gt', [input_pc_np, gt_pc_color_np])])
visualizer.display_current_results(dict, 1, 1)
def compute_iou_np_array(score, seg, label, visualizer, opt, input_pc):
part_label = [
[0, 1, 2, 3],
[4, 5],
[6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18],
[19, 20, 21],
[22, 23],
[24, 25, 26, 27],
[28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37],
[38, 39, 40],
[41, 42, 43],
[44, 45, 46],
[47, 48, 49]
]
_, seg_predicted = torch.max(score, dim=1) # BxN
iou_batch = []
for i in range(score.size()[0]):
iou_pc = []
for part in part_label[label[i]]:
gt = seg[i] == part
predict = seg_predicted[i] == part
intersection = (gt + predict) == 2
union = (gt + predict) >= 1
if union.sum() == 0:
iou_part = 1.0
else:
iou_part = intersection.int().sum().item() / (union.int().sum().item() + 0.0001)
iou_pc.append(iou_part)
iou_batch.append(np.asarray(iou_pc).mean())
iou_np = np.asarray(iou_batch)
return iou_np
def compute_iou(score, seg, label, visualizer, opt, input_pc):
'''
:param score: BxCxN tensor
:param seg: BxN tensor
:return:
'''
part_label = [
[0, 1, 2, 3],
[4, 5],
[6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18],
[19, 20, 21],
[22, 23],
[24, 25, 26, 27],
[28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37],
[38, 39, 40],
[41, 42, 43],
[44, 45, 46],
[47, 48, 49]
]
_, seg_predicted = torch.max(score, dim=1) # BxN
iou_batch = []
vis_flag = False
for i in range(score.size()[0]):
iou_pc = []
for part in part_label[label[i]]:
gt = seg[i] == part
predict = seg_predicted[i] == part
intersection = (gt + predict) == 2
union = (gt + predict) >= 1
# print(intersection)
# print(union)
# assert False
if union.sum() == 0:
iou_part = 1.0
else:
iou_part = intersection.int().sum().item() / (union.int().sum().item() + 0.0001)
# debug to see what happened
# if iou_part < 0.1:
# print(part)
# print('predict:')
# print(predict.nonzero())
# print('gt')
# print(gt.nonzero())
# vis_flag = True
iou_pc.append(iou_part)
# debug to see what happened
if vis_flag:
print('============')
print(iou_pc)
print(label[i])
visualize_pc_seg(score[i], seg[i], label[i], visualizer, opt, input_pc[i], i)
iou_batch.append(np.asarray(iou_pc).mean())
iou = np.asarray(iou_batch).mean()
return iou
class ChamferLoss(nn.Module):
def __init__(self, opt):
super(ChamferLoss, self).__init__()
self.opt = opt
self.dimension = 3
self.k = 1
# we need only a StandardGpuResources per GPU
self.res = faiss.StandardGpuResources()
#self.res.setTempMemoryFraction(0.1)
self.flat_config = faiss.GpuIndexFlatConfig()
self.flat_config.device = opt.gpu_id
# place holder
self.forward_loss = torch.FloatTensor([0])
self.backward_loss = torch.FloatTensor([0])
def build_nn_index(self, database):
'''
:param database: numpy array of Nx3
:return: Faiss index, in CPU
'''
# index = faiss.GpuIndexFlatL2(self.res, self.dimension, self.flat_config) # dimension is 3
index_cpu = faiss.IndexFlatL2(self.dimension)
index = faiss.index_cpu_to_gpu(self.res, self.opt.gpu_id, index_cpu)
index.add(database)
return index
def search_nn(self, index, query, k):
'''
:param index: Faiss index
:param query: numpy array of Nx3
:return: D: Variable of Nxk, type FloatTensor, in GPU
I: Variable of Nxk, type LongTensor, in GPU
'''
D, I = index.search(query, k)
D_var =torch.from_numpy(np.ascontiguousarray(D))
I_var = torch.from_numpy(np.ascontiguousarray(I).astype(np.int64))
if self.opt.gpu_id >= 0:
D_var = D_var.to(self.opt.device)
I_var = I_var.to(self.opt.device)
return D_var, I_var
def forward(self, predict_pc, gt_pc):
'''
:param predict_pc: Bx3xM Variable in GPU
:param gt_pc: Bx3xN Variable in GPU
:return:
'''
predict_pc_size = predict_pc.size()
gt_pc_size = gt_pc.size()
# 1 2 3 4 5 6
predict_pc_np = np.ascontiguousarray(torch.transpose(predict_pc.data.clone(), 1, 2).cpu().numpy()) # BxMx3
gt_pc_np = np.ascontiguousarray(torch.transpose(gt_pc.data.clone(), 1, 2).cpu().numpy()) # BxNx3
# selected_gt: Bxkx3xM
selected_gt_by_predict = torch.FloatTensor(predict_pc_size[0], self.k, predict_pc_size[1], predict_pc_size[2])
# selected_predict: Bxkx3xN
selected_predict_by_gt = torch.FloatTensor(gt_pc_size[0], self.k, gt_pc_size[1], gt_pc_size[2])
if self.opt.gpu_id >= 0:
selected_gt_by_predict = selected_gt_by_predict.to(self.opt.device)
selected_predict_by_gt = selected_predict_by_gt.to(self.opt.device)
# process each batch independently.
for i in range(predict_pc_np.shape[0]):
index_predict = self.build_nn_index(predict_pc_np[i])
index_gt = self.build_nn_index(gt_pc_np[i])
# database is gt_pc, predict_pc -> gt_pc -----------------------------------------------------------
_, I_var = self.search_nn(index_gt, predict_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_gt_by_predict[i,k,...] = gt_pc[i].index_select(1, I_var[:,k])
# database is predict_pc, gt_pc -> predict_pc -------------------------------------------------------
_, I_var = self.search_nn(index_predict, gt_pc_np[i], self.k)
# process nearest k neighbors
for k in range(self.k):
selected_predict_by_gt[i,k,...] = predict_pc[i].index_select(1, I_var[:,k])
# compute loss ===================================================
# selected_gt(Bxkx3xM) vs predict_pc(Bx3xM)
forward_loss_element = robust_norm(selected_gt_by_predict-predict_pc.unsqueeze(1).expand_as(selected_gt_by_predict))
self.forward_loss = forward_loss_element.mean()
self.forward_loss_array = forward_loss_element.mean(dim=1).mean(dim=1)
# selected_predict(Bxkx3xN) vs gt_pc(Bx3xN)
backward_loss_element = robust_norm(selected_predict_by_gt - gt_pc.unsqueeze(1).expand_as(selected_predict_by_gt)) # BxkxN
self.backward_loss = backward_loss_element.mean()
self.backward_loss_array = backward_loss_element.mean(dim=1).mean(dim=1)
self.loss_array = self.forward_loss_array + self.backward_loss_array
return self.forward_loss + self.backward_loss # + self.sparsity_loss
def __call__(self, predict_pc, gt_pc):
# start_time = time.time()
loss = self.forward(predict_pc, gt_pc)
# print(time.time()-start_time)
return loss
|
import unittest
import nest_msa
import particle
class NestMSATestCase(unittest.TestCase):
def test_create_peer_matrix_0(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
correct_output = [['a', 'a', 'a', 'a'],
['b', 'c', 'b', 'b'],
['c', 'b', 'c', 'c'],
['b', 'c', 'h', 'b'],
['c', 'f', 'i', 'c'],
['d', 'g', 'm', 'j'],
['e', None, 'n', 'k'],
['m', None, None, 'm']]
self.assertEqual(nest_msa.create_peer_matrix(sequences), correct_output)
def test_create_peer_matrix_1(self):
sequences = ["abbc", "abcd", "bbcd", "cddd", "ddde"]
correct_output = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
self.assertEqual(nest_msa.create_peer_matrix(sequences), correct_output)
def test_create_peer_matrix_2(self):
sequences = ["abc", "", "def"]
correct_output = [['a', None, 'd'],
['b', None, 'e'],
['c', None, 'f']]
self.assertEqual(nest_msa.create_peer_matrix(sequences), correct_output)
def test_create_peer_matrix_3(self):
sequences = ["a-bcbcdem", "acbcfg", "a-bchimn", "a-bcbcjkm"]
correct_output = [['a', 'a', 'a', 'a'],
['-', 'c', '-', '-'],
['b', 'b', 'b', 'b'],
['c', 'c', 'c', 'c'],
['b', 'f', 'h', 'b'],
['c', 'g', 'i', 'c'],
['d', None, 'm', 'j'],
['e', None, 'n', 'k'],
['m', None, None, 'm']]
self.assertEqual(nest_msa.create_peer_matrix(sequences), correct_output)
def test_weight_0(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.weight(matrix[0]), 1.0)
def test_weight_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.weight(matrix[1]), 0.1875)
def test_objective_0(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.objective(matrix, 1), 2.625)
def test_objective_1(self):
sequences = ["abcdef", "aaccee", "bbddff", "abaded", "abccdd", "abcdefg"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.objective(matrix, 1), 6.25)
def test_objective_2(self):
sequences = ["The quick brown fox jumped over the lazy dog", "The quick brn fox jumed oer te laxy dogg", "Thje quicc brownn foxy jnmped oevr the lazzy do"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertAlmostEqual(nest_msa.objective(matrix, 10), 11.0)
def test_objective_3(self):
sequences = ["The quick brown fox jumped over the lazy dog", "The quick brn fox jumed oer te laxy dogg", "Thje quicc brownn foxy jnmped oevr the lazzy do"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertAlmostEqual(nest_msa.objective(matrix, 34), 3.5)
def test_mostfrequent_0(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.mostfrequent(matrix[1]), (3, 'b'))
def test_mostfrequent_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.mostfrequent(matrix[2]), (3, 'c'))
def test_mostfrequent_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.mostfrequent(matrix[3]), (2, 'b'))
def test_criteria2_0(self):
p = particle.Particle(0, 0)
p.updated = 3
self.assertEqual(nest_msa.criteria2(p, 2), True)
def test_criteria2_1(self):
p = particle.Particle(0, 0)
p.updated = 3
self.assertEqual(nest_msa.criteria2(p, 3), False)
def test_criteria2_2(self):
p = particle.Particle(0, 0)
p.updated = 3
self.assertEqual(nest_msa.criteria2(p, 4), False)
def test_criteria3_0(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
p = particle.Particle('b', [0,[2]])
self.assertEqual(nest_msa.criteria3(p, 1, M), True)
def test_criteria3_1(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
p = particle.Particle('b', [0,[0, 1, 2]])
self.assertEqual(nest_msa.criteria3(p, 1, M), False)
def test_criteria3_2(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
p = nest_msa.getposition('d', 2, M)
self.assertEqual(nest_msa.criteria3(p, 3, M), True)
def test_stopcriteria_0(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
p = particle.Particle('b', [0,[0, 1, 2]])
p.updated = 5
self.assertEqual(nest_msa.stopcriteria(p, 1, M, 4), False)
def test_stopcriteria_1(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
p = particle.Particle('b', [0,[2]])
p.updated = 4
self.assertEqual(nest_msa.stopcriteria(p, 1, M), False)
def test_stopcriteria_2(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'd', 'd'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
p = particle.Particle('b', [0,[2]])
p.updated = 6
self.assertEqual(nest_msa.stopcriteria(p, 1, M), True)
def test_row_alignment_0(self):
M = [['a', 'a', 'b', 'c', 'd'],
['b', 'b', 'b', 'b', 'b'],
['b', 'c', 'c', 'd', 'd'],
['c', 'd', 'd', 'd', 'e']]
self.assertEqual(nest_msa.row_alignment(1, M), None)
def test_row_alignment_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
M = nest_msa.create_peer_matrix(sequences)
correctp = particle.Particle('c', (1, [1]))
correctp.best = (2, [0,1,2,3])
correctp.best_value = 9.0
self.assertEqual(nest_msa.row_alignment(1, M), correctp)
def test_row_alignment_2(self):
M = [['a', 'a', 'a', '-', 'a'],
['b', 'b', 'b', 'b', 'b'],
['c', 'c', 'c', 'c', 'c'],
['d', 'd', 'd', 'd', 'd']]
self.assertEqual(nest_msa.row_alignment(0, M), None)
def test_nest_msa_main(self):
correctM = [['a', 'a', 'a', 'a'],
['b', '-', 'b', 'b'],
['c', 'c', 'c', 'c'],
['b', 'b', '-', 'b'],
['c', 'c', '-', 'c'],
['d', 'f', 'h', 'j'],
['e', 'g', 'i', 'k'],
['m', None, 'm', 'm'],
[None, None, 'n', None]]
M = nest_msa.create_peer_matrix(['abcbcdem', 'acbcfg', 'abchimn', 'abcbcjkm'])
self.assertEqual(nest_msa.nest_msa_main(M), correctM)
def test_full_row_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.full_row(matrix[0]), True)
def test_full_row_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.full_row(matrix[1]), False)
def test_full_row_3(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.full_row(matrix[2]), False)
def test_full_row_4(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.full_row(matrix[3]), False)
def test_remove_missing_rows_1(self):
sequences = ["abcbcde", "acbcfg", "abchimn", "abcbcjkm"]
matrix1 = nest_msa.create_peer_matrix(sequences)
sequences = ["abcbcde", "acbcfg", "abchimn", "abcbcjkm"]
matrix2 = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.remove_missing_rows(matrix1), matrix2)
def test_remove_missing_rows_2(self):
sequences = ["abcbcde", "acbcfg", "abchimn", "abcbcjkm-"]
matrix1 = nest_msa.create_peer_matrix(sequences)
sequences = ["abcbcde", "acbcfg", "abchimn", "abcbcjkm"]
matrix2 = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.remove_missing_rows(matrix1), matrix2)
def test_remove_missing_rows_3(self):
sequences = ["abcbcde", "acbcfg", "abchimn---", "abcbcjkm--"]
matrix1 = nest_msa.create_peer_matrix(sequences)
sequences = ["abcbcde", "acbcfg", "abchimn-", "abcbcjkm"]
matrix2 = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.remove_missing_rows(matrix1), matrix2)
def test_get_position_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual((nest_msa.getposition('b', 1, matrix)).pos, (1, [0, 2, 3]))
def test_get_position_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual((nest_msa.getposition('c', 1, matrix)).pos, (1, [1]))
def test_get_position_3(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual((nest_msa.getposition('b', 0, matrix)).pos, (0, []))
def test_fly_down_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
p = particle.Particle('b', (1, [0, 2, 3]))
matrix_expected = nest_msa.create_peer_matrix(["a-bcbcdem", "acbcfg", "a-bchimn", "a-bcbcjkm"])
self.assertEqual(nest_msa.fly_down(p, matrix), matrix_expected)
def test_fly_down_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
p = particle.Particle('b', (1, [0, 2, 3]))
matrix_expected = nest_msa.create_peer_matrix(["a---bcbcdem", "acbcfg", "a---bchimn", "a---bcbcjkm"])
self.assertEqual(nest_msa.fly_down(p, matrix, stride=3), matrix_expected)
def test_column_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.column(matrix, 0), ["a", "b", "c", "b", "c", "d", "e", "m"])
def test_column_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.column(matrix, 1), ["a", "c", "b", "c", "f", "g", None, None])
def test_aligned_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.aligned(matrix[0]), True)
def test_aligned_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.aligned(matrix[7]), True)
def test_aligned_3(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.aligned(matrix[6]), False)
def test_create_swarm_1(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.create_swarm(0, matrix), [particle.Particle('a', (0, [0, 1, 2, 3]))])
def test_create_swarm_2(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.create_swarm(1, matrix), [particle.Particle('b', (1, [0, 2, 3])), particle.Particle('c', (1, [1]))])
def test_create_swarm_3(self):
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.create_swarm(7, matrix), [particle.Particle('m', (7, [0, 3]))])
if __name__ == '__main__':
unittest.main()
|
# The purpose of the program is understand and see how to use web scraping
# in python. This project demonstrated web scraping amazon prices and sending me an
# email when the price of the item went down!
# scraper.py
# By Pranav Rao
import requests # Installed requests and BS4, requests help get the information
from bs4 import BeautifulSoup # from a website and BS4 is used to extract that information
import smtplib # This is built in import that allows us to send/received email
import time # This will run
# This is the URL that can be replaced on the desired item
URL = 'https://www.amazon.com/Face-Mask-Pack-of-50/dp/B086KMYNSS/ref=sr_1_5?dchild=1&keywords=mask&qid=1593704243&sr' \
'=8-5 '
# This is the user agent(search on google)
header = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/83.0.4103.116 Safari/537.36'}
# This is one module/function
def check_price():
page = requests.get(URL, headers=header) # using the request from the URL
soup = BeautifulSoup(page.content, 'html.parser') # Parses the Html file
title = soup.find(id="productTitle").get_text() # Extract product title
price = soup.find(id="priceblock_ourprice").get_text() # Extract price
converted_price = float(price[1:3]) # In python to cover string to float
if converted_price < 29.00:
send_mail()
print(converted_price) # This is for the console output
print(title.strip())
if converted_price > 28.00: # Prints if the price is lower than current price
send_mail()
# This function is sending email in python
def send_mail():
server = smtplib.SMTP('smtp.gmail.com', 587) # gmails Server
server.ehlo() # Command sent by an email server to identify itself
server.starttls() # starts the server
server.ehlo() # Command sent by an email server to identify itself
server.login('raop7495@gmail.com', 'ILkobe!!') # Got to put in login information
subject = 'Price is Down!' # Subject of the Email
body = 'Check the amazon link https://www.amazon.com/Face-Mask-Pack-of-50/dp/B086KMYNSS/ref=sr_1_5?dchild=1' \
'&keywords=mask&qid=1593704243&sr=8-5 ' # This is the body where the link to the product is provide
msg = f"Subject: {subject}\n\n{body}" # Sending the message in proper subject
server.sendmail( # Sending from and to email addresses
'prao1524@gmail.com',
'raop7495@gmail.com',
msg # Includes the proper messages
)
print('HEY EMAIL HAS BEEN SENT') # Just for the console/knowing that I was able to send mail
server.quit() # This stop the signal of the server
while True:
check_price() # Calls the check price function
time.sleep(86400) # The time function allows to say how many time a day
|
import sys
import os
f = open("C:/Users/user/Documents/python/atcoder/ABC093/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
a = list(input())
a.sort()
a = "".join(a)
if a == "abc":
print("Yes")
else:
print("No")
|
import json
import sqlite3
import time
import shelve
import os
from constants import *
# SQL Stuff
LIST_TABLES = """
SELECT name FROM sqlite_master WHERE type='table';
"""
CREATE_TABLE = '''
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
'''
INSERT = """
INSERT OR IGNORE INTO "{}" (ID, num)
VALUES (?, ?);
"""
SELECT = """
SELECT * FROM "{}"
WHERE ID = ?;
"""
class SqlMap(object):
def __init__(self, name, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
# check if table exists, if not create TABLE
self._cur.execute(LIST_TABLES)
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(CREATE_TABLE.format(self._name.replace('"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(INSERT.format(self._name.replace('"', '""')), (index, edges))
except Exception as e:
print(e)
print("Update Failed")
def __getitem__(self, index):
self._cur.execute(SELECT.format(self._name.replace('"', '""')), (index,))
try:
return self._cur.fetchall()[0][1]
except Exception as e:
return None
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
"""
vec = Vector("yoav_table", 0, EDGES_VECTOR_PATH)
print(vec[0])
vec[0] = "yo"
print(vec[0])
vec.save()
"""
|
import pygame
from engine.gameobject import GameObject
from game.display import Display
class Permanent(GameObject):
def __init__(self, sprite, grid_pos, parent, app):
self.size = (111, 155)
coords = ((self.size[0] + 2) * grid_pos[0], (self.size[1] + 2) * grid_pos[1])
GameObject.__init__(self, sprite, coords, parent, app)
self.old_sprite = self.sprite
self.sprite = pygame.transform.scale(self.sprite, self.size)
self.rec = self.sprite.get_rect()
def update(self, delta_time):
GameObject.update(self, delta_time)
x, y = self.app.mouse
if self.rec.top < y < self.rec.bottom and self.rec.left < x < self.rec.right and self.app.mouse_click:
Display(self.old_sprite, self.app)
def draw(self, screen):
GameObject.draw(self, screen)
|
# Copyright (C) 2014 Yellow Feather Ltd
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
import aviosys
import sys
dev = aviosys.Aviosys8800()
dev.open()
dev.init()
loop = 1
while loop <= 1:
print " "
print "interface is ",
status = dev.getStatus()
if status == True:
print "ON"
else:
print "OFF"
print " "
print "command: ( + - ): ",
command = sys.stdin.readline()
if len(command) <= 1:
dev.close()
print "--USB switch closed"
exit(0)
if command.find("+") >= 0:
print "--switching ON"
dev.turnOn()
if command.find("-") >= 0:
print "--switching OFF"
dev.turnOff()
continue
|
import numpy as np
import pyjacob # no need to specify location, this is done in the main script file (thus selecting the pyjacob.so created with the correct mechanism )
import scipy.linalg as LA
import cantera as ct
import pdb
import matplotlib.pyplot as plt
import csv
import pandas as pd
def csv_append(line, path):
with open(path, 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(line)
def solve_eig_gas(gas):
# input arg: gas cantera object
# output arg: eigenvalue vector, left and right eigenvector matrices
T = gas.T
P = gas.P
# #setup the state vector
y = np.zeros(gas.n_species)
y[0] = T
y[1:] = gas.Y[:-1]
# #create a dydt vector
dydt = np.zeros_like(y)
pyjacob.py_dydt(0, P, y, dydt)
#create a jacobian vector
jac = np.zeros(gas.n_species * gas.n_species)
#evaluate the Jacobian
pyjacob.py_eval_jacobian(0, P, y, jac)
jac = jac.reshape(gas.n_species,gas.n_species)
# Solve eigenvalue PB > D: eigenvalues
D, vl, vr = LA.eig(jac, left = True)
D=D.real
vl=vl.real
vr=vr.real
# introduced to get rid of zeros: careful!
# D = np.delete(D,np.where(D==0.0))
# cannot delete here, must be after (affects EI)
return D, vl, vr
def highest_val_excl_0(vect,N_val):
# take out all zero entries
vect = np.delete(vect,np.where(vect==0.0))
top_val = vect[np.argsort(vect)[-N_val:]]
return top_val
def EI(D,l,r,k):
# D is 1D array of eigenvalues, corresponding to l(eft) eigenvector and r(ight) eigenvector matrices
# returns the E(xplosive) I(ndex) calculated as
a = r[:,k]
b = l[:,k] # changed this according to documentation
jac_dim = len(a)
ab=np.zeros(jac_dim)
for j in range(jac_dim):
ab[j] = abs(a[j]*b[j])
S = np.sum(ab)
ab = ab/S
return ab
# def PI()
def check_alignment(alignment,eig2track,loc, ei_previous):
# Check good fit (other possible misfits)
if abs((np.amax(alignment) - np.sort(alignment)[-2]))/np.amax(alignment) < 0.01:
# print "less than 1 perc. for eig2track {:d} at loc {:d}".format(eig2track,loc)
# print "top two align scores : ", np.sort(alignment)[-2], np.sort(alignment)[-1], " at idx ", np.argsort(alignment)[-1]
# print "top two mac__ scores : ", np.sort(mac)[-2], np.sort(alignment)[-1], " at idx ", np.argsort(mac)[-1]
return 1
def solve_eig_flame(f,gas, fitting, eig2track=-1):
N_eig = 9# selects number of eigenvalues to store for each flame location
N_EI = 3 # number of EI species to track
T = f.T # 1D array with temperatures
Y = f.Y # matrix array with lines corresponding to the 29 species, columns corresponding to the grid points
P = f.P # single value
n_species = gas.n_species
grid_pts = len(f.grid)
# STORE ALL N_eig maximum eigenvalues in all grid points
eigenvalues = np.zeros([N_eig, grid_pts])
hard_points = np.zeros([grid_pts])
# Indices species to track (with highest EI)
track_specs=[] # initialised as list, then converts to np.array when using np.union1d
# Explosive indices along all the flame
global_expl_indices = np.zeros([n_species, grid_pts])
# Followed by EI eigenvalue
eig_CEM = np.empty(grid_pts)
# FIND HIGHEST EIGENVALUE ALONG FLAME
for loc in range(grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
eigenvalues[:,loc] = D[np.argsort(D)[-N_eig:]]
# if loc%10 == 0:
# # pdb.set_trace()
# vec4 = R[:,np.argsort(D)[-3]]
# fig, ax = plt.subplots()
# ax.plot(vec4,'x')
# plt.xlabel('vector component number')
# ax.set_xticks(range(len(vec4)))
# ax.set_xticklabels(['T','H2','O2','H2O','H','O','OH','HO2','H2O2','AR','HE','CO','CO2'])
# # plt.show()
# fig_name = 'right_eig_loc{:d}.pdf'.format(loc)
# plt.savefig(fig_name)
# plt.cla()
# plt.close()
# position of maximum eigenvalue at max eigenvalue position
start_loc = np.argmax(eigenvalues[-1,:])
# if np.amax(eigenvalues[eig2track,:]) < 10:
# start_loc = np.argmin(eigenvalues[eig2track,:])
# FORWARD FOLLOWING
for loc in range(start_loc,grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species --> careful: it must be the first spec in mech before AR HE
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
if loc == start_loc:
start_eig_idx = np.argsort(D)[eig2track]
ei_previous = EI(D,L,R,start_eig_idx) # false previous
alignment = np.zeros(len(D))
mac = np.zeros(len(D))
for idx in range(len(D)):
ei_tested = EI(D,L,R,idx)
alignment[idx] = parallelism(ei_tested, ei_previous)
mac[idx] = MAC(ei_tested, ei_previous)
# Check good fit (other possible misfits)
hard_points[loc] = check_alignment(alignment,eig2track,loc, ei_previous)
if fitting == 'mac':
best_fit_idx = np.argmax(mac)
elif fitting == 'cos':
best_fit_idx = np.argmax(alignment)
# if hard_points[loc] == 1:
# best_fit_idx = np.argsort(mac)[-2]
ei_current = EI(D,L,R,best_fit_idx)
main_species_local = np.argsort(ei_current)[-N_EI:]
track_specs = np.union1d(main_species_local,track_specs)
# Store followed EI eigenvalue (CEM) and EI
global_expl_indices[:,loc] = ei_current
eig_CEM[loc] = D[best_fit_idx]
ei_previous = ei_current
# BACKWARDS FOLLOWING
for loc in range(start_loc,-1,-1):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
if loc == start_loc:
if eig2track != -1:
start_eig_idx = np.argsort(D)[eig2track]
ei_previous = EI(D,L,R,start_eig_idx) # false previous
alignment = np.zeros(len(D))
mac = np.zeros(len(D))
for idx in range(len(D)):
ei_tested = EI(D,L,R,idx)
alignment[idx] = parallelism(ei_tested, ei_previous)
mac[idx] = MAC(ei_tested, ei_previous)
# Check good fit (other possible misfits)
hard_points[loc] = check_alignment(alignment,eig2track,loc,ei_previous)
if fitting == 'mac':
best_fit_idx = np.argmax(mac)
elif fitting == 'cos':
best_fit_idx = np.argmax(alignment)
# if hard_points[loc] == 1:
# best_fit_idx = np.argsort(mac)[-2]
ei_current = EI(D,L,R,best_fit_idx)
main_species_local = np.argsort(ei_current)[-N_EI:]
track_specs = np.union1d(main_species_local,track_specs)
# Store followed EI eigenvalue (CEM) and EI
global_expl_indices[:,loc] = ei_current
eig_CEM[loc] = D[best_fit_idx]
ei_previous = ei_current
track_specs=map(int,track_specs)
print start_loc, 'was start loc '
return eig_CEM, global_expl_indices, track_specs, start_loc, eigenvalues, hard_points
def solve_eig_flame_OLD(f,gas, switch_x1, switch_x2):
N_eig = 8# selects number of eigenvalues to store for each flame location
N_EI = 1 # number of EI species to track
T = f.T # 1D array with temperatures
Y = f.Y # matrix array with lines corresponding to the 29 species, columns corresponding to the grid points
P = f.P # single value
n_species = gas.n_species
grid_pts = len(f.grid)
eigenvalues = np.zeros([N_eig, grid_pts])
track_specs=[] # initialised as list, then converts to np.array when using np.union1d
global_expl_indices = np.zeros([n_species, grid_pts])
eig_CEM = []
# iterate over x-span of 1D flame domain
for loc in range(grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, vl, vr = LA.eig(jac, left = True)
D = D.real
# Store the N most positive eigenvalues
eigenvalues_loc = D[np.argsort(D)[-N_eig:]]
# PATCHING OF EI (methane gri30.cti)
# switch_x1 = 0.0106049
# switch_x2 = 0.0106081
switch_idx1 = np.where(f.grid > switch_x1)
switch_idx2 = np.where(f.grid > switch_x2)
# order is the
if loc < switch_idx1[0][0]:
order = -1
if loc > switch_idx1[0][0] and loc < switch_idx2[0][0]:
order = -6
if loc > switch_idx2[0][0]:
order = -7
# order = -8
ei_idx = np.argsort(D)[order]
# Store explosive indices corresponding to chemical explosive mode at x loc(ation) in 1D domain
expl_indices = EI(D,vl,vr,ei_idx)
# GET MOST IMPORTANT SPECIES
# select indices of N_EI most important species (attention: pyjac position index!! need converting with reorder_species)
main_species_local = np.argsort(expl_indices)[-N_EI:]
# Union operation between indices of main species at x loc(ation) and previous important species
track_specs = np.union1d(main_species_local,track_specs)
global_expl_indices[:,loc] = expl_indices
eigenvalues[:,loc] = eigenvalues_loc
# Store patched eigenvalue CEM
eig_CEM.append(D[np.argsort(D)[order]])
track_specs = track_specs.astype(np.int64)
return eigenvalues, global_expl_indices, track_specs, eig_CEM
# def
# EI_keys = ['']*gas.n_species
# EI_keys[0] = 'T'
# for i in range(1,gas.n_species):
# EI_keys[i] = gas.species_name(i-1)
# print gas.species_name(i)
# print EI_keys
def reorder_species(pyjac_indices, N2_idx):
# subtract one position to pyjac indices up to index where N2 was taken out of species list:
# example: cantera species [O H N2 CH3 CH4], pyjac y vector [T O H CH3 CH4]
# --> to bring back to original cantera species indices, need to subtract 1 to positions up to index of N2 (2 here)
cantera_idx = pyjac_indices
for i in range(len(cantera_idx)):
if cantera_idx[i] == 0:
# in the case that temperature has one of the highest explosive indices
print "Temperature is EI --> check that functionality works \n \t --> problem with list (passed as reference and modified)"
cantera_idx[i] = -1
elif cantera_idx[i] <= N2_idx:
cantera_idx[i] -= 1
return cantera_idx
def list_spec_names(cantera_order,gas):
species_names=[]
for i in range(len(cantera_order)):
if cantera_order[i] == -1:
species_names.append('T')
else:
species_names.append(gas.species_name(cantera_order[i]))
return species_names
def get_species_names(tracked_species_idx, gas):
# Returns dictionary with trackes species names as keys, indices in pyjac notation as values
# tracked_species_idx is in pyjac notation (reordered N2 at the end).
# pdb.set_trace()
N2_idx = gas.species_index('N2')
# Revert pyjac ordering
cantera_species_idx = reorder_species(tracked_species_idx, N2_idx)
# get species names corresponding to cantera_species_idx
species_names = list_spec_names(cantera_species_idx, gas)
# create dictionary with species names (string) as keys and pyjac indices as values
dictionary = dict(zip(species_names,tracked_species_idx))
# reset temperature index in pyjac notation
dictionary['T'] = 0
return dictionary
def get_names(gas):
ei_components = []
ei_components.append('T')
for i in range(gas.n_species):
if gas.species_name(i) != 'N2':
ei_components.append(gas.species_name(i))
return ei_components
def create_jacobian(T,P,y):
n_species = len(y)
dydt = np.zeros_like(y)
pyjacob.py_dydt(0, P, y, dydt)
#create a jacobian vector
jac = np.zeros(n_species*n_species)
#evaluate the Jacobian
pyjacob.py_eval_jacobian(0, P, y, jac)
jac = jac.reshape(n_species,n_species)
return jac
def setSolutionProperties(gas,Z,press=1):
# DEFINE ZO (oxydiser) side mixture
T_Z0 = 1500
C2H4_Z0 = 0.0
CO2_Z0 = 0.15988194019
O2_Z0 = 0.0289580664922
N2_Z0 = 0.723951662304
H2O_Z0 = 0.087208331013
phi_ZO = 0
# DEFINE Z1 (fuel) side mixture
Z1compo = getEthyleneJetCompo(.8) # phi_j = 0.8, 1.0, 1.2
T_Z1 = 300
C2H4_Z1 = Z1compo['C2H4']
CO2_Z1 = 0.0
O2_Z1 = Z1compo['O2']
N2_Z1 = Z1compo['N2']
H2O_Z1 = 0.0
phi_Z1 = C2H4_Z1/O2_Z1*96/28
print phi_Z1, 'phi of the jet'
# CALCULATE gas state as function of Z (mixture fraction)
Tempi = ((T_Z0-T_Z1)/(0.0-1.0))*Z + T_Z0 # used approximation of Cp = uniform?
yc2h4 = ((C2H4_Z0-C2H4_Z1)/(0.0-1.0))*Z + C2H4_Z0
yco2 = ((CO2_Z0-CO2_Z1)/(0.0-1.0))*Z + CO2_Z0
yo2 = ((O2_Z0-O2_Z1)/(0.0-1.0))*Z + O2_Z0
yn2 = ((N2_Z0-N2_Z1)/(0.0-1.0))*Z + N2_Z0
yh2o = ((H2O_Z0-H2O_Z1)/(0.0-1.0))*Z + H2O_Z0
phi = yc2h4/yo2*96/28
# print "The equivalence ratio is", phi
# phi is equivalence ratio: stoech ratio massic for methane was 1/4 fuel/oxygen
# for C3H8 stoech ratio massic is 44/160
# for C2H4 it is 28/96, corresponding to 1 mole of C2H4 to 3 of O2
compo="C2H4:"+str(yc2h4)+" O2:"+str(yo2)+" N2:"+str(yn2)+" CO2:"+str(yco2)+" H2O:"+str(yh2o)
# print "********** Initial state **********"
print " - C2H4 mass fraction: "+str(yc2h4)
print " - CO2 mass fraction: "+str(yco2)
print " - O2 mass fraction : "+str(yo2)
print " - N2 mass fraction : "+str(yn2)
print " - H2O mass fraction: "+str(yh2o)
print " - sum mass fraction: "+str(yc2h4+yo2+yn2+yco2+yh2o), "\n \n"
# print "Temperature of mixture:"
print(Tempi)
gas.TPY = Tempi, press*1.01325e5, compo
return gas, phi
def getEthyleneJetCompo(phi):
# Calculation of fresh gases composition
# phi = 0.8 - 1.2 equivalence ratio
# C2H4 combustion reaction:
# C2H4 + 3 O2 => 2 CO2 + 2 H2O
# molar mixing ratio of dry air:
# oxygen: 0.21
# nytrogen: 0.78
# rest is neglected
# everything done in moles, then converted to mass at the end using molar masses
# Molar masses
mm_C2H4 = 2*12.0 + 4*1.0
mm_O2 = 2*16.0
mm_CO2 = 12.0 + 2*16.0
mm_H20 = 2*1.0 + 16.0
mm_N2 = 2*14.0
Fuel2Oxygen_mole=1/3.0 # stoechiometric combustion
# moles of fresh gases
n_C2H4 = 1 # keep moles of fuel as reference = 1
n_O2 = 1/Fuel2Oxygen_mole/phi
n_N2 = 0.78/0.21*n_O2
# masses of cross-jet components
m_C2H4 = n_C2H4*mm_C2H4
m_O2 = n_O2*mm_O2
m_N2 = n_N2*mm_N2
# total number of moles and total mass
n_tot = n_N2 + n_O2 + n_C2H4
m_tot = m_N2 + m_O2 + m_C2H4
# mass fractions of vitiated atmosphere components
f_O2_mass = m_O2/m_tot
f_N2_mass = m_N2/m_tot
f_C2H4_mass = m_C2H4/m_tot
check_sum = f_N2_mass + f_O2_mass + f_C2H4_mass
# print f_O2_mass, " O2"
# print f_N2_mass, " N2"
# print f_C2H4_mass, " C2H4"
# print "Sum of mass fractions: "
# print check_sum
compo = {'O2':f_O2_mass, 'N2':f_N2_mass, 'C2H4':f_C2H4_mass}
return compo
def parallelism(v1,v2):
# returns [-1,1]
num = np.dot(v1,v2)
den = LA.norm(v1)*LA.norm(v2)
# high value of parallelism
parallelism = num/den
return parallelism
def MAC(v1,v2):
# Modal assurance criterion
num = np.power(np.dot(v1,v2),2)
den = np.dot(v1,v1)*np.dot(v2,v2)
return num/den
def solve_eig_flame_track_update(f,gas, fitting, file_max_ei):
N_eig = 9# selects number of eigenvalues to store for each flame location
N_EI = 3 # number of EI species to track
eig2track = -1
T = f.T # 1D array with temperatures
Y = f.Y # matrix array with lines corresponding to the 29 species, columns corresponding to the grid points
P = f.P # single value
n_species = gas.n_species
grid_pts = len(f.grid)
# STORE ALL N_eig maximum eigenvalues in all grid points
eigenvalues = np.zeros([N_eig, grid_pts])
hard_points = np.zeros([grid_pts])
# Indices species to track (with highest EI)
track_specs=[] # initialised as list, then converts to np.array when using np.union1d
# Explosive indices along all the flame
global_expl_indices = np.zeros([n_species, grid_pts])
# Followed by EI eigenvalue
eig_CEM = np.empty(grid_pts)
# FIND HIGHEST EIGENVALUE ALONG FLAME
for loc in range(grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
eigenvalues[:,loc] = D[np.argsort(D)[-N_eig:]]
## PLOT EIGENVECTORS
# if loc%20 == 0:
# # pdb.set_trace()
# vec4 = R[:,np.argsort(D)[-1]]
# fig, ax = plt.subplots()
# ax.plot(vec4,'x')
# plt.xlabel('vector component number')
# ax.set_xticks(range(len(vec4)))
# ax.set_xticklabels(['T','H2','O2','H2O','H','O','OH','HO2','H2O2','AR','HE','CO','CO2'])
# # plt.show()
# fig_name = 'right_eig_loc{:d}.pdf'.format(loc)
# plt.savefig(fig_name)
# plt.cla()
# plt.close()
# position of maximum eigenvalue at max eigenvalue position
start_loc = np.argmax(eigenvalues[eig2track,:])
start_loc = np.argmax(eigenvalues[-1,:])
# if np.amax(eigenvalues[eig2track,:]) < 10:
# start_loc = np.argmin(eigenvalues[eig2track,:])
# FORWARD FOLLOWING
for loc in range(start_loc,grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species --> careful: it must be the first spec in mech before AR HE
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
if loc == start_loc:
start_eig_idx = np.argsort(D)[eig2track]
ei_previous = EI(D,L,R,start_eig_idx) # false previous
csv_append(ei_previous, file_max_ei)
alignment = np.zeros(len(D))
mac = np.zeros(len(D))
for idx in range(len(D)):
ei_tested = EI(D,L,R,idx)
alignment[idx] = parallelism(ei_tested, ei_previous)
mac[idx] = MAC(ei_tested, ei_previous)
# Check good fit (other possible misfits)
hard_points[loc] = check_alignment(alignment,eig2track,loc, ei_previous)
if fitting == 'mac':
best_fit_idx = np.argmax(mac)
elif fitting == 'cos':
best_fit_idx = np.argmax(alignment)
# if hard_points[loc] == 1:
# best_fit_idx = np.argsort(mac)[-2]
ei_current = EI(D,L,R,best_fit_idx)
main_species_local = np.argsort(ei_current)[-N_EI:]
track_specs = np.union1d(main_species_local,track_specs)
# Store followed EI eigenvalue (CEM) and EI
global_expl_indices[:,loc] = ei_current
eig_CEM[loc] = D[best_fit_idx]
ei_previous = ei_current
# BACKWARDS FOLLOWING
for loc in range(start_loc,-1,-1):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
if loc == start_loc:
if eig2track != -1:
start_eig_idx = np.argsort(D)[eig2track]
ei_previous = EI(D,L,R,start_eig_idx) # false previous
alignment = np.zeros(len(D))
mac = np.zeros(len(D))
for idx in range(len(D)):
ei_tested = EI(D,L,R,idx)
alignment[idx] = parallelism(ei_tested, ei_previous)
mac[idx] = MAC(ei_tested, ei_previous)
# Check good fit (other possible misfits)
hard_points[loc] = check_alignment(alignment,eig2track,loc,ei_previous)
if fitting == 'mac':
best_fit_idx = np.argmax(mac)
elif fitting == 'cos':
best_fit_idx = np.argmax(alignment)
# if hard_points[loc] == 1:
# best_fit_idx = np.argsort(mac)[-2]
ei_current = EI(D,L,R,best_fit_idx)
main_species_local = np.argsort(ei_current)[-N_EI:]
track_specs = np.union1d(main_species_local,track_specs)
# Store followed EI eigenvalue (CEM) and EI
global_expl_indices[:,loc] = ei_current
eig_CEM[loc] = D[best_fit_idx]
ei_previous = ei_current
track_specs=map(int,track_specs)
print start_loc, 'was start loc '
return eig_CEM, global_expl_indices, track_specs, start_loc, eigenvalues, hard_points
def solve_eig_track_no_update(f,gas, fitting, file_max_ei):
N_eig = 9# selects number of eigenvalues to store for each flame location
N_EI = 3 # number of EI species to track
eig2track = -1
T = f.T # 1D array with temperatures
Y = f.Y # matrix array with lines corresponding to the 29 species, columns corresponding to the grid points
P = f.P # single value
n_species = gas.n_species
grid_pts = len(f.grid)
# STORE ALL N_eig maximum eigenvalues in all grid points
eigenvalues = np.zeros([N_eig, grid_pts])
hard_points = np.zeros([grid_pts])
# Indices species to track (with highest EI)
track_specs=[] # initialised as list, then converts to np.array when using np.union1d
# Explosive indices along all the flame
global_expl_indices = np.zeros([n_species, grid_pts])
# Followed by EI eigenvalue
eig_CEM = np.empty(grid_pts)
df=pd.read_csv(file_max_ei, sep=',',header=None)
EI_max = df.values.ravel()
# # FIND HIGHEST EIGENVALUE ALONG FLAME
for loc in range(grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
eigenvalues[:,loc] = D[np.argsort(D)[-N_eig:]]
## PLOT EIGENVECTORS
# if loc%20 == 0:
# # pdb.set_trace()
# vec4 = R[:,np.argsort(D)[-1]]
# fig, ax = plt.subplots()
# ax.plot(vec4,'x')
# plt.xlabel('vector component number')
# ax.set_xticks(range(len(vec4)))
# ax.set_xticklabels(['T','H2','O2','H2O','H','O','OH','HO2','H2O2','AR','HE','CO','CO2'])
# # plt.show()
# fig_name = 'right_eig_loc{:d}.pdf'.format(loc)
# plt.savefig(fig_name)
# plt.cla()
# plt.close()
# position of maximum eigenvalue at max eigenvalue position
# if np.amax(eigenvalues[eig2track,:]) < 10:
# start_loc = np.argmin(eigenvalues[eig2track,:])
start_loc = np.argmax(eigenvalues[eig2track,:])
# Scan whold 1D domain
for loc in range(grid_pts):
y=np.zeros(n_species)
y[0] = T[loc]
# find the position of N2 species --> careful: it must be the first spec in mech before AR HE
N2_idx = gas.species_index('N2')
y_massfr = np.concatenate([Y[0:N2_idx,loc], Y[N2_idx+1:,loc]])
y[1:] = y_massfr
jac = create_jacobian(T,P,y)
D, L, R = LA.eig(jac, left = True)
D = D.real
alignment = np.zeros(len(D))
for idx in range(len(D)):
ei_tested = EI(D,L,R,idx)
alignment[idx] = parallelism(ei_tested, EI_max)
# Check good fit (other possible misfits)
hard_points[loc] = check_alignment(alignment,eig2track,loc, EI_max)
best_fit_idx = np.argmax(alignment)
ei_current = EI(D,L,R,best_fit_idx)
main_species_local = np.argsort(ei_current)[-N_EI:]
track_specs = np.union1d(main_species_local,track_specs)
# Store followed EI eigenvalue (CEM) and EI
global_expl_indices[:,loc] = ei_current
eig_CEM[loc] = D[best_fit_idx]
track_specs=map(int,track_specs)
return eig_CEM, global_expl_indices, track_specs, start_loc, eigenvalues, hard_points
def set_mixture_wagner(gas,Z,phi_j,press=1):
# DEFINE ZO (oxydiser) side mixture
Z0compo = get_propane_vitiated_gas()
T_Z0 = 1500
C2H4_Z0 = 0.0
CO2_Z0 = Z0compo['CO2'] # 0.15988194019
O2_Z0 = Z0compo['O2'] # 0.0289580664922
N2_Z0 = Z0compo['N2'] # 0.723951662304
H2O_Z0 = Z0compo['H2O'] # 0.087208331013
phi_ZO = 0
# DEFINE Z1 (fuel) side mixture
Z1compo = getEthyleneJetCompo(phi_j) # phi_j = 0.8, 1.0, 1.2
T_Z1 = 300
C2H4_Z1 = Z1compo['C2H4']
CO2_Z1 = 0.0
O2_Z1 = Z1compo['O2']
N2_Z1 = Z1compo['N2']
H2O_Z1 = 0.0
phi_Z1 = C2H4_Z1/O2_Z1*96/28
print phi_Z1, 'phi of the jet'
# CALCULATE gas state as function of Z (mixture fraction)
Tempi = ((T_Z0-T_Z1)/(0.0-1.0))*Z + T_Z0 # used approximation of Cp = uniform?
yc2h4 = ((C2H4_Z0-C2H4_Z1)/(0.0-1.0))*Z + C2H4_Z0
yco2 = ((CO2_Z0-CO2_Z1)/(0.0-1.0))*Z + CO2_Z0
yo2 = ((O2_Z0-O2_Z1)/(0.0-1.0))*Z + O2_Z0
yn2 = ((N2_Z0-N2_Z1)/(0.0-1.0))*Z + N2_Z0
yh2o = ((H2O_Z0-H2O_Z1)/(0.0-1.0))*Z + H2O_Z0
phi = yc2h4/yo2*96/28
# print "The equivalence ratio is", phi
# phi is equivalence ratio: stoech ratio massic for methane was 1/4 fuel/oxygen
# for C3H8 stoech ratio massic is 44/160
# for C2H4 it is 28/96, corresponding to 1 mole of C2H4 to 3 of O2
compo="C2H4:"+str(yc2h4)+" O2:"+str(yo2)+" N2:"+str(yn2)+" CO2:"+str(yco2)+" H2O:"+str(yh2o)
# print "********** Initial state **********"
print " - C2H4 mass fraction: "+str(yc2h4)
print " - CO2 mass fraction: "+str(yco2)
print " - O2 mass fraction : "+str(yo2)
print " - N2 mass fraction : "+str(yn2)
print " - H2O mass fraction: "+str(yh2o)
print " - sum mass fraction: "+str(yc2h4+yo2+yn2+yco2+yh2o), "\n \n"
print "Temperature of mixture:"
print(Tempi)
gas.TPY = Tempi, press*1.01325e5, compo
return gas, phi, Tempi
def getEthyleneJetCompo(phi):
# Calculation of fresh gases composition
# phi = 0.8 - 1.2 equivalence ratio
# C2H4 combustion reaction:
# C2H4 + 3 O2 => 2 CO2 + 2 H2O
# molar mixing ratio of dry air:
# oxygen: 0.21
# nytrogen: 0.78
# rest is neglected
# everything done in moles, then converted to mass at the end using molar masses
# Molar masses
mm_C2H4 = 2*12.0 + 4*1.0
mm_O2 = 2*16.0
mm_CO2 = 12.0 + 2*16.0
mm_H20 = 2*1.0 + 16.0
mm_N2 = 2*14.0
Fuel2Oxygen_mole=1/3.0 # stoechiometric combustion
# moles of fresh gases
n_C2H4 = 1 # keep moles of fuel as reference = 1
n_O2 = 1/Fuel2Oxygen_mole/phi
n_N2 = 0.78/0.21*n_O2
# masses of cross-jet components
m_C2H4 = n_C2H4*mm_C2H4
m_O2 = n_O2*mm_O2
m_N2 = n_N2*mm_N2
# total number of moles and total mass
n_tot = n_N2 + n_O2 + n_C2H4
m_tot = m_N2 + m_O2 + m_C2H4
# mass fractions of vitiated atmosphere components
f_O2_mass = m_O2/m_tot
f_N2_mass = m_N2/m_tot
f_C2H4_mass = m_C2H4/m_tot
check_sum = f_N2_mass + f_O2_mass + f_C2H4_mass
# print f_O2_mass, " O2"
# print f_N2_mass, " N2"
# print f_C2H4_mass, " C2H4"
# print "Sum of mass fractions: "
# print check_sum
compo = {'O2':f_O2_mass, 'N2':f_N2_mass, 'C2H4':f_C2H4_mass}
return compo
def get_propane_vitiated_gas():
# Calculation of first combustion vitiated atmosphere
# phi = 0.87 equivalence ratio
# C3H8 combustion:
# C3H8 + 5 O2 => 3 CO2 + 4 H2O
# molar mixing ratio of dry air:
# oxygen: 0.21
# nytrogen: 0.78
# rest is neglected --> 1 O2 + 3.76 N2
# everything done in moles, then converted to mass at the end using molar masses
# Molar masses
mm_C3H8 = 3*12.0 + 8*1.0
mm_O2 = 2*16.0
mm_CO2 = 12.0 + 2*16.0
mm_H20 = 2*1.0 + 16.0
mm_N2 = 2*14.0
phi = 0.87
FuelOxygen_mole=1/5.0 # stoechiometric combustion
# moles of fresh gases
n_C3H8_fresh = 1 # keep moles of fuel as reference = 1
n_O2_fresh = 5/phi
n_N2_fresh = 3.76*n_O2_fresh
# moles of vitiated atmosphere components
n_N2_vitiated = n_N2_fresh
n_O2_left = n_O2_fresh - n_C3H8_fresh/FuelOxygen_mole
n_CO2 = 3
n_H2O = 4
# masses of vititated atmosphere components
m_N2 = n_N2_vitiated*mm_N2
m_O2_left = n_O2_left*mm_O2
m_CO2 = n_CO2*mm_CO2
m_H20 = n_H2O*mm_H20
# total number of moles and total mass
n_tot = n_N2_vitiated + n_O2_left + n_CO2 + n_H2O
m_tot = m_N2 + m_O2_left + m_CO2 + m_H20
# mass fractions of vitiated atmosphere components
f_O2_mass = m_O2_left/m_tot
f_N2_mass = m_N2/m_tot
f_CO2_mass = m_CO2/m_tot
f_H2O_mass = m_H20/m_tot
check_sum = f_O2_mass + f_N2_mass + f_CO2_mass + f_H2O_mass
print f_O2_mass, " O2"
print f_N2_mass, " N2"
print f_CO2_mass, " CO2"
print f_H2O_mass, "H2O"
print "Sum of mass fractions: "
print check_sum
print "Mass fraction of unreduced oxygen in vitiated atmosphere (% total)"
print f_O2_mass*100
print "Corresponds to 3% value given"
# compo = 'O2:' + str(f_O2_mass) + ' N2:' + str(f_N2_mass) + ' CO2:' + str(f_CO2_mass) + ' H2O:' + str(f_H2O_mass)
compo = {'O2':f_O2_mass, 'N2':f_N2_mass, 'CO2':f_CO2_mass, 'H2O':f_H2O_mass}
return compo
|
"""
何海兵
"""
from flask import Blueprint
hhb = Blueprint('hhb', __name__)
from . import views
|
"""Forward Kerberos tickets to the cell ticket locker.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import socket
import sys
import click
from treadmill import cli
from treadmill import context
from treadmill import dnsutils
from treadmill import exc
from treadmill import restclient
from treadmill import subproc
_LOGGER = logging.getLogger(__name__)
_ON_EXCEPTIONS = cli.handle_exceptions([
(exc.InvalidInputError, None),
])
def _run(command):
"""Run command."""
success = False
try:
output = subproc.check_output(
command,
)
_LOGGER.info('Success.')
success = True
except subproc.CalledProcessError as err:
output = str(err)
_LOGGER.error('Failure.')
cli.out(output)
return success
def _run_tkt_sender(cells):
"""Actually run ticket sender"""
dns_domain = context.GLOBAL.dns_domain
first = True
tktfwd_spn = None
if os.name == 'nt':
tktfwd_spn = _tktfwd_spn(dns_domain)
_LOGGER.debug('Using tktfwd SPN: %s', tktfwd_spn)
success = True
for cellname in cells:
endpoints_v2 = _check_cell(cellname, 'tickets-v2', dns_domain)
if not endpoints_v2:
_LOGGER.error('Ticket locker is down for cell: %s', cellname)
success = False
for idx, hostport in enumerate(endpoints_v2):
host, port = hostport
_LOGGER.info(
'Forwarding tickets to cell: %s/%d - %s:%s',
cellname,
idx,
host,
port
)
host, port = hostport
if os.name == 'nt' and first:
first = False
tkt_cmd = [
'tkt_send_v2',
'-h{}'.format(host),
'-p{}'.format(port),
'--purge',
]
else:
tkt_cmd = [
'tkt_send_v2',
'-h{}'.format(host),
'-p{}'.format(port),
]
if tktfwd_spn:
tkt_cmd.append('--service={}'.format(tktfwd_spn))
success = _run(tkt_cmd) and success
return success
def _get_cells():
"""Get all cell names"""
restapi = context.GLOBAL.admin_api()
cells = restclient.get(restapi, '/cell/').json()
return [cell['_id'] for cell in cells]
def _check_cell(cellname, appname, dns_domain):
"""Return active endpoint for the locker given cell name."""
srvrec = '_tickets._tcp.{}.{}.cell.{}'.format(appname,
cellname,
dns_domain)
result = dnsutils.srv(srvrec, context.GLOBAL.dns_server)
active = []
for host, port, _, _ in result:
sock = None
try:
sock = socket.create_connection((host, port), 1)
active.append((host, port))
except socket.error:
_LOGGER.warning('Ticket endpoint [%s] is down: %s:%s',
cellname, host, port)
finally:
if sock:
sock.close()
active.sort()
return active
def _tktfwd_spn(dns_domain):
"""Return tktfwd SPN for given dns domain."""
tktfwd_rec = dnsutils.txt(
'tktfwd.%s' % (dns_domain),
context.GLOBAL.dns_server
)
if tktfwd_rec:
return tktfwd_rec[0]
else:
return None
def init():
"""Return top level command handler"""
@click.command()
@click.option('--cell', help='List of cells',
type=cli.LIST)
@_ON_EXCEPTIONS
def forward(cell):
"""Forward Kerberos tickets to the cell ticket locker."""
_LOGGER.setLevel(logging.INFO)
cells = cell
if not cell:
cells = _get_cells()
rc = _run_tkt_sender(cells)
# TODO: it seems like returning from click callback with non-0 does not
# set the $? correctly.
sys.exit(0 if rc else 1)
return forward
|
from flaskbox.config import config
def test_config_get_name(base_app):
"""Get an application name"""
name = config.get_name(base_app)
assert name == base_app[0]['application']['name']
def test_config_get_routes(base_app):
"""Test if an array with routes exists
"""
routes = config.get_routes(base_app)
assert (isinstance(routes, list), len(routes)) == (True, 1)
def test_config_get_fields(base_app):
"""If fields exists, return an array with field objects
"""
fields = None
routes = config.get_routes(base_app)
for route in routes:
fields = config.get_fields(route)
assert (isinstance(fields, list), len(fields)) == (True, 5)
def test_config_port_not_exists(base_app):
"""If port not exists, return default 5000 port
"""
port = config.get_port(base_app)
assert 5000 == port
|
from tkinter import *
root = Tk()
a = 1000000000
x = 10
z = 10
y = 0
narko = 0
barig = 1
prod = 0
root.title('Комната в общаге')
root.geometry('1000x1000')
kypil = 0
def button_clicked():
global a
a = 0
x = 1
print("Вы всё спалили.")
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Вы всё спалили', a)
def button2_clicked():
global a
global x
a = a + x
print("Кол-во купюр" + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button3_clicked():
global a
global x
global kypil
if a > 100:
a = a - 100
x = x + 2
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
button3 = Button(bg = 'red')
def button4_clicked():
global a
global x
global kypil
if a > 500:
a = a - 500
x = x + 10
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button5_clicked():
global a
global x
global kypil
if a > 5000:
a = a - 5000
x = x + 20
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button6_clicked():
global a
global x
global kypil
if a > 10000:
a = a - 10000
x = x + 50
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button7_clicked():
global a
global x
global kypil
if a > 15000:
a = a - 15000
x = x + 100
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button8_clicked():
global a
global x
global kypil
if a > 50000:
a = a - 50000
x = x + 150
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button9_clicked():
global a
global x
global kypil
global window
global narko
if a > 100000:
a = a - 100000
x = x + 200
print('Стафф лаба куплена' + ' ' + 'Ваши купюры:' + ' ' + str(a))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
narko_show()
narko = 1
canvas = Canvas(root, width = 450, height = 50, bg = 'blue')
canvas.create_text(200, 20, text = 'HARDLIVE', font = 14)
canvas.pack()
button = Button(root, bg = 'red', text = "Сжечь в мусорном баке", command = button_clicked, font = 200, fg = 'yellow', width = 50)
button2 = Button(root, bg = 'green', text = "Напечатать деньги", command = button2_clicked, font = 200, fg = 'yellow', width = 50)
button3 = Button(root, bg = 'green', text = "Улучшение №1, Стоимость: 100$", command = button3_clicked, font = 200, fg = 'yellow', width = 50)
button4 = Button(root, bg = 'green', text = "Улучшение №2, Стоимость: 500$", command = button4_clicked, font = 200, fg = 'yellow', width = 50)
button5 = Button(root, bg = 'green', text = "Улучшение №3, Стоимость: 5000$", command = button5_clicked, font = 200, fg = 'yellow', width = 50)
button6 = Button(root, bg = 'green', text = "Улучшение №4, Стоимость: 10000$", command = button6_clicked, font = 200, fg = 'yellow', width = 50)
button7 = Button(root, bg = 'green', text = "Улучшение №5, Стоимость: 15000$", command = button7_clicked, font = 200, fg = 'yellow', width = 50)
button8 = Button(root, bg = 'green', text = "Улучшение №6, Стоимость: 50000$", command = button8_clicked, font = 200, fg = 'yellow', width = 50)
button9 = Button(root, bg = 'green', text = "Купить стафф лабу, Стоимость: 100000$", command = button9_clicked, font = 200, fg = 'yellow', width = 50)
text1=Text(root, height = 1, font='Arial 14', wrap=WORD)
text1.place(x = 10, y = 490)
canvas.pack()
button.place(x = 10, y = 60)
button2.place(x = 10, y = 110)
button3.place(x = 10, y = 160)
button4.place(x = 10, y = 210)
button5.place(x = 10, y = 260)
button6.place(x = 10, y = 310)
button7.place(x = 10, y = 360)
button8.place(x = 10, y = 410)
button9.place(x = 10, y = 460)
def narko_show():
def button10_clicked():
global y
global z
y = 0
z = 10
print("Вы все смыли.")
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Вы все смыли' + ' ' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button11_clicked():
global y
global z
global narko
if narko > 0:
global y
global z
y = y + z
print("Кол-во купюр" + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во граммов:' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button12_clicked():
global a
global x
global narko
global z
if a > 10000:
a = a - 10000
z = z + 20
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во купюр:' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button13_clicked():
global a
global x
global narko
global z
if a > 50000:
a = a - 50000
z = z + 100
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во купюр:' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button14_clicked():
global a
global x
global narko
global z
if a > 500000:
a = a - 500000
z = z + 200
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во купюр:' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button15_clicked():
global a
global x
global narko
global z
if y > 1000000:
a = y - 1000000
z = z + 500
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во купюр:' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button16_clicked():
global a
global x
global narko
global z
if a > 1500000:
a = a - 1500000
z = z + 1000
print('Улучшение куплено.' + ' ' + 'Ваши купюры:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во :' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button17_clicked():
global a
global x
global narko
global barig
global prod
global y
if a > 5000000 and barig == 0:
a = a - 5000000
barig = 1
print('Улучшение куплено.' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во граммов:' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif a > 5000000 and barig == 1:
a = a - 5000000
barig = 2
print('Улучшение куплено.' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во граммов' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif a > 5000000 and barig == 2:
a = a - 5000000
barig = 3
print('Улучшение куплено.' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во граммов' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif a > 5000000 and barig == 3:
a = a - 5000000
barig = 4
print('Улучшение куплено.' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Кол-во граммов' + str(y))
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
def button18_clicked():
global prod
global a
global y
global z
global window
global barig
if barig == 0:
a = a + y * 2
y = 0
print('Стафф продан' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Продано')
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif barig == 1:
a = a + y * 3
y = 0
print('Стафф продан' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Продано')
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif barig == 2:
a = a + y * 4
y = 0
print('Стафф продан' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Продано')
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif barig == 3:
a = a + y * 5
y = 0
print('Стафф продан' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Продано')
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
elif barig == 4:
a = a + y * 6
y = 0
print('Стафф продана' + ' ' + 'Ваши граммы:' + ' ' + str(y))
text2.delete('1.0', END)
text2.get('1.0', END)
text2.insert(1.0, 'Продано')
text1.delete('1.0', END)
text1.get('1.0', END)
text1.insert(1.0, 'Кол-во купюр:' + str(a))
button10 = Button(root, bg = 'red', text = "Смыть в унитаз", command = button10_clicked, font = 200, fg = 'yellow', width = 50)
button11 = Button(root, bg = 'blue', text = "Сварить стафф", command = button11_clicked, font = 200, fg = 'yellow', width = 50)
button12 = Button(root, bg = 'blue', text = "Нанять рабочего, Стоимость: 10000$", command = button12_clicked, font = 200, fg = 'yellow', width = 50)
button13 = Button(root, bg = 'blue', text = "Заплатить копам, Стоимость: 50000$", command = button13_clicked, font = 200, fg = 'yellow', width = 50)
button14 = Button(root, bg = 'blue', text = "Рассказать людям, Стоимость: 500000$", command = button14_clicked, font = 200, fg = 'yellow', width = 50)
button15 = Button(root, bg = 'blue', text = "Купить плиту, Стоимость: 1000000$", command = button15_clicked, font = 200, fg = 'yellow', width = 50)
button16 = Button(root, bg = 'blue', text = "Купить рекламу, Стоимость: 1500000$", command = button16_clicked, font = 200, fg = 'yellow', width = 50)
button17 = Button(root, bg = 'blue', text = "Нанять барыгу, Стоимость: 5000000$", command = button17_clicked, font = 200, fg = 'yellow', width = 50)
button18 = Button(root, bg = 'blue', text = "Продать стафф", command = button18_clicked, font = 200, fg = 'yellow', width = 50)
text2=Text(root, height = 1, font='Arial 14', wrap=WORD)
text2.place(x = 10, y = 550)
button10.place(x = 500, y = 60)
button11.place(x = 500, y = 110)
button12.place(x = 500, y = 160)
button13.place(x = 500, y = 210)
button14.place(x = 500, y = 260)
button15.place(x = 500, y = 310)
button16.place(x = 500, y = 360)
button17.place(x = 500, y = 410)
button18.place(x = 500, y = 460)
root.mainloop()
|
from ._create_mos_target_cat_example import get_data_dict as \
_get_data_dict_for_example
from ._create_mos_target_cat_example import set_keywords_info as \
_set_keywords_info_for_example
from ._create_mos_target_cat_example import create_mos_target_cat as \
create_mos_target_cat
from .add_targets_to_xmls import add_targets
|
{
"name":"Bahmni Custom For Gudalur",
"version":"1.0",
"author":"ThoughtWorks Technologies Pvt. Ltd.",
"category":"BahmniCustom",
"description":"Custom changes for gudalur environment",
"depends": ["base","bahmni_customer_payment","account_voucher","bahmni_internal_stock_move","bahmni_pharmacy_product","bahmni_purchase_extension","bahmni_stock_batch_sale_price","bahmni_sale_discount","sale","stock","purchase","account"],
'data':['module_misc.xml','attribute_in_sale_order.xml','price_in_stock_product_lot.xml','claim_type.xml','sale_order_type_shop_form.xml','sale_care_setting_form.xml','product_category_department.xml','chargetype_category_mapping.xml','product_schedule_h_form.xml'],
'demo':[],
'auto_install':False,
'application':True,
'installable':True,
} |
import numpy as np
import os
# Specific functions fed to searchlight ---------------------------------------
# TODO should move these out (into pycorr?) eventually
def pattern_similarity(d_list, TRs, offset_TR=5, pattern_indx=None, nan_thresh=None):
"""
Parameters:
pattern_indx: boolean mask over segment-segment correlation matrix
d_interl: list of nifti images with dim (xyzt)
d_intact: same
TRs_interl: dataframe describing each TR from story blueprint
TRs_intact: same
offset_TR: number of TRs to shift data (to offset for HRF)
"""
segmat = load_mvpa(d_list, TRs, None, offset_TR)
# TODO: What if voxel nans for sub?
segcors = crosscor_full(segmat, nan_thresh=nan_thresh)
# Do comparison
#return segcors.mean(axis=0)[pattern_indx].mean()
return segcors # sub x seg x seg
from pycorr.funcs_correlate import crosscor, standardize, sum_tc
def crosscor_full(A, B=None, nan_thresh=None):
"""From data (dims sub x seg x vox) calculate sub against others pattern cor
Parameters:
A: sub x seg x vox matrix
B: (optional) seg x vox matrix
Returns:
seg x seg correlation matrix
"""
# standardize all along last dim, so don't need to in correlation
A = standardize(A)
all_cors = []
# Within group correlations
if B is None:
others = sum_tc(A)
for sub in A:
# check for nan
to_remove = np.any(np.isnan(sub), axis=0)
if np.any(to_remove):
tmp_sub = sub[...,to_remove]
tmp_others = others[..., to_remove]
else:
tmp_sub, tmp_others = sub, others
# cross correlate (1 x seg x seg)
if nan_thresh is not None and to_remove.mean() > nan_thresh:
cormat = np.empty(sub.shape[0:1]*2) * np.nan
else:
cormat = crosscor(tmp_sub, standardize(tmp_others - tmp_sub), standardized=True)
all_cors.append(cormat)
return np.array(all_cors)
# Between group correlations
else:
B = standardize(B)
for sub in A:
cormat = crosscor(sub, B, standardized=True)
all_cors.append(cormat)
return np.array(all_cors)
# Functions taken from event_analysis -----------------------------------------
def subset_from_TRs(mat, TRs, offset=0):
"""Subset last dim of nparray using TR index."""
indx = TRs.tshift(freq=offset).index # align TRs to mat
#mask = np.array([ii in indx for ii in range(mat.shape[-1])])
# change to directly select using index.
# will throw an error if index is longer than releavant mat dim!
return mat[..., indx.tolist()]
def load_mvpa(all_fnames, TRs, bad_vox, offset_TR, collapse=True):
"""Return matrix of shape (sub x seg x vox)
Parameters:
all_fnames: names of nifti files to load for sub dimension
TRs: dataframe with cond column and order column
bad_vox: mask with true for voxels to be discarded
offset_TR: TRs to shift timecourses before subsetting (to take into account lag, etc..)
collapse: whether to take mean along last axis
Notes:
If collapse is False, then sub and seg dims are lists.
"""
subs_list = []
for fname in all_fnames:
# Load Array
if type(fname) is str:
subname, ext = os.path.splitext(os.path.split(fname)[-1])
#subkey = "_".join(subname.split('_')[:2])
arr = np.load(fname)
else: arr = fname
# Standardize, make sure no NaNs
arr = standardize(arr)[~bad_vox if bad_vox is not None else Ellipsis] #TODO write more clearly
#arr = arr[ np.isnan(arr).sum(axis=-1) == 0] #remove any nans (from no var?)
# Get individual segments
# since it sorts in order of columns, will be sorted by cond first, then order
segs_list = []
cond_list = []
# TODO remove hard coded conditions
for ii, g in TRs.query("cond in ['Slumlord', 'Overview']").groupby(['cond', 'order']):
#print ii
cond_list.append(ii)
segarr = subset_from_TRs(arr, g, offset=offset_TR)
segs_list.append(segarr)
# optionally collapse time dimension
if collapse:
mat = np.vstack([seg.mean(axis=1) for seg in segs_list])
subs_list.append(mat)
else:
subs_list.append(segs_list)
#print cond_list
M = np.array(subs_list) if collapse else subs_list # Make field array, with sub names?
return M
|
import random
from Battle import *
class RandTrainer(Trainer):
def Act(self):
return random.randint(0,3)
if __name__ == "__main__":
print("Let's get ready to Battle!")
bulba = AllPokemon.MakeNewPokemon(1, 50)
bulba.SetMoves(["Vine Whip", "Tackle"])
pika = AllPokemon.MakeNewPokemon(25, 50)
pika.SetMoves(["Thunder Shock", "Tackle"])
gary = RandTrainer("Gary", bulba)
ash = RandTrainer("Ash", pika)
randBattle = Battle(gary, ash)
randBattle.Battle(20)
|
import numpy as np
import matplotlib.pyplot as plt
def knn(trainX, trainY, testX, K):
dist = (((trainX - testX) ** 2).sum(1)) ** 0.5
# (((trainX - testX) ** 2).sum(1)) ** 0.5 # np.sum(np.sqrt((trainX - testX) ** 2), axis=1)
sortedDist = dist.argsort()
classCount = {}
for i in range(K):
voteLabel = trainY[sortedDist[i]]
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
maxType = 0
maxCount = -1
for key, value in classCount.items():
if value > maxCount:
maxType = key
maxCount = value
return maxType
def getKValue(trainXSet, trainYSet, validXSet, validYSet, trainNum, validNum, batch_size):
print("start KNN")
errorCount = 0.0
acc = []
k = []
mTest = np.random.randint(0, validNum, 500)
for j in range(0, 50):
k.append(j) # trainNum // batch_size
for i in range(len(mTest)):
classifierResult = knn(trainXSet[:2500], trainYSet[:2500], validXSet[mTest[i]], j + 1)
# print("KNN得到的辨识结果是: %d, 实际值是: %d" % (classifierResult, testY[i]))
if (classifierResult != validYSet[mTest[i]]): errorCount += 1.0
acc.append(((1 - errorCount / float(len(mTest))) * 100))
errorCount = 0.0
indexTmp = np.argwhere(acc == np.amax(acc))
index = []
for i in range(len(indexTmp)):
index.append(indexTmp[i][0])
plt.plot(k, acc)
plt.title('KNN Correct rate', fontsize=24)
plt.xlabel('K', fontsize=14)
plt.ylabel('Correct rate(%)', fontsize=14)
plt.show()
print("\nValid KNN辨识率为: %f %" % np.mean(acc))
print("finished KNN")
return int(np.mean(index))
def knnPredict(trainXSet, trainYSet, validXSet, validYSet, testX, testY, trainNum, validNum, batch_size):
K = getKValue(trainXSet, trainYSet, validXSet, validYSet, trainNum, validNum, batch_size)
print("K:{}".format(K))
acc = []
errorCount = 0.0
Num = len(testX)
for j in range(Num):
for i in range(len(testX[j])):
classifierResult = knn(trainXSet, trainYSet, testX[j][i], K)
# print("KNN得到的辨识结果是: %d, 实际值是: %d" % (classifierResult, testY[i]))
if (classifierResult != testY[j][i]): errorCount += 1.0
acc.append((1 - errorCount / float(len(testX[j]))) * 100)
errorCount = 0.0
# print("\nKNN辨识错误数量为: %d" % errorCount)
# print("\nKNN辨识率为: %f %" % acc)
return acc
|
"""List Applicable Devices Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from .upgradepackages import UpgradePackages
import logging
import warnings
class ListApplicableDevices(APIClassTemplate):
"""The ListApplicableDevices Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"upgradePackage",
"model",
"modelId",
"modelNumber",
"modelType",
"healthStatus",
"sw_version",
"isPartofContainer",
"containerType",
"healthPolicy",
"accessPolicy",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/updates/upgradepackages"
def __init__(self, fmc, **kwargs):
"""
Initialize ListApplicableDevices object.
Set self.type to "UpgradePackage", and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for ListApplicableDevices class.")
self.type = "UpgradePackage"
self.parse_kwargs(**kwargs)
def upgrade_package(self, package_name):
"""
Upgrade named package.
:param package_name: (str) Name of package to upgrade
:return: None
"""
logging.debug("In upgrade_package() for ListApplicableDevices class.")
package1 = UpgradePackages(fmc=self.fmc)
package1.get(name=package_name)
if "id" in package1.__dict__:
self.package_id = package1.id
self.URL = f"{self.fmc.platform_url}{self.URL_SUFFIX}/{self.package_id}/applicabledevices"
self.package_added_to_url = True
else:
logging.warning(
f"UpgradePackage {package_name} not found. Cannot get list of ListApplicableDevices."
)
def post(self):
"""POST method for API for ListApplicableDevices not supported."""
logging.info("POST method for API for ListApplicableDevices not supported.")
pass
def put(self):
"""PUT method for API for ListApplicableDevices not supported."""
logging.info("PUT method for API for ListApplicableDevices not supported.")
pass
def delete(self):
"""DELETE method for API for ListApplicableDevices not supported."""
logging.info("DELETE method for API for ListApplicableDevices not supported.")
pass
class ApplicableDevices(ListApplicableDevices):
"""
Dispose of this Class after 20210101.
Use ListApplicableDevices() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn(
"Deprecated: ApplicableDevices() should be called via ListApplicableDevices()."
)
super().__init__(fmc, **kwargs)
|
import os
from posixpath import join
dirs = [os.path.join('data','raw'),
os.path.join('data','processed'),
'notebooks','saved_models','src']
for dir in dirs:
os.makedirs(dir,exist_ok=True)
with open(os.path.join(dir,'.gitkeep'),'w') as f:
pass
files =["dvc.yaml","params.yaml",".gitignore",os.path.join("src","__init__.py")]
for file in files:
with open(file,'w') as f:
pass |
import yaml
import json
import re
class JSONObject:
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class CLIConfig(JSONObject):
def __init__(self, volumes):
self.container_name = "cli"
self.image = "hyperledger/fabric-tools:latest"
self.tty = True
self.stdin_open = True
self.environment = ["SYS_CHANNEL=byfn-sys-channel",
"GOPATH=/opt/gopath",
"CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock",
"FABRIC_LOGGING_SPEC=DEBUG",
"FABRIC_LOGGING_SPEC=INFO",
"CORE_PEER_ID=cli",
"CORE_PEER_ADDRESS=peer0.org1.example.com:21000",
"CORE_PEER_LOCALMSPID=Org1MSP",
"CORE_PEER_TLS_ENABLED=true",
"CORE_PEER_TLS_CERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/server.crt",
"CORE_PEER_TLS_KEY_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/server.key",
"CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt",
"CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp"]
self.working_dir = "/opt/gopath/src/github.com/hyperledger/fabric/peer"
self.command = "/bin/bash"
self.volumes = ["/var/run/:/host/var/run/",
"./../chaincode/:/opt/gopath/src/github.com/chaincode",
"./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/",
"./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/",
"./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts"]
self.depends_on = list(volumes)
self.networks = ["byfn"]
class OrdererConfig(JSONObject):
def __init__(self, orderer_id=""):
self.container_name = "orderer%s.example.com" % orderer_id
self.extends = {"file": "base/peer-base.yaml",
"service": "orderer-base"}
self.volumes = ["./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block",
"./crypto-config/ordererOrganizations/example.com/orderers/orderer%s.example.com/msp:/var/hyperledger/orderer/msp" % orderer_id,
"./crypto-config/ordererOrganizations/example.com/orderers/orderer%s.example.com/tls/:/var/hyperledger/orderer/tls" % orderer_id,
"orderer%s.example.com:/var/hyperledger/production/orderer" % orderer_id]
if orderer_id != "":
self.ports = ["%d:%d" % (18000 + int(orderer_id), 7050)]
else:
self.ports = ["%d:%d" % (18000, 7050)]
self.networks = ["byfn"]
class PeerConfig(JSONObject):
def __init__(self, org_id, peer_id, total_peers):
self.container_name = "peer%d.org%d.example.com" % (peer_id, org_id)
self.extends = {"file": "base/peer-base.yaml",
"service": "peer-base"}
bootstrap = ""
for i in range(total_peers):
if i != peer_id:
bootstrap += "peer%d.org%d.example.com:%d " % (i, org_id, 20000 + org_id * 1000 + i)
# if peer_id != total_peers - 1:
# bootstrap += "peer%d.org%d.example.com:%d" % (total_peers - 1, org_id, 20000 + org_id * 1000 + total_peers - 1)
# bootstrap += "]"
self.environment = ["CORE_PEER_ID=peer%d.org%d.example.com" % (peer_id, org_id),
"CORE_PEER_ADDRESS=peer%d.org%d.example.com:%d" % (
peer_id, org_id, 20000 + org_id * 1000 + peer_id),
"CORE_PEER_LISTENADDRESS=0.0.0.0:%d" % (20000 + org_id * 1000 + peer_id),
"CORE_PEER_CHAINCODEADDRESS=peer%d.org%d.example.com:%d" % (
peer_id, org_id, 18000 + org_id * 100 + peer_id),
"CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:%d" % (18000 + org_id * 100 + peer_id),
"CORE_PEER_GOSSIP_BOOTSTRAP=" + bootstrap,
"CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer%d.org%d.example.com:%d" % (
peer_id, org_id, 20000 + org_id * 1000 + peer_id),
"CORE_PEER_LOCALMSPID=Org%dMSP" % org_id]
self.volumes = ["/var/run/:/host/var/run/",
"./crypto-config/peerOrganizations/org%d.example.com/peers/peer%d.org%d.example.com/msp:/etc/hyperledger/fabric/msp" % (
org_id, peer_id, org_id),
"./crypto-config/peerOrganizations/org%d.example.com/peers/peer%d.org%d.example.com/tls:/etc/hyperledger/fabric/tls" % (
org_id, peer_id, org_id),
"peer%d.org%d.example.com:/var/hyperledger/production" % (peer_id, org_id)]
self.ports = ["%d:%d" % (20000 + org_id * 1000 + peer_id, 20000 + org_id * 1000 + peer_id)]
self.networks = ["byfn"]
class CAConfig(JSONObject):
def __init__(self, org_id):
self.image = "hyperledger/fabric-ca:latest"
self.environment = [
"FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server",
"FABRIC_CA_SERVER_CA_NAME=ca.org%d.example.com" % org_id,
"FABRIC_CA_SERVER_TLS_ENABLED=true",
"FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org%d.example.com-cert.pem" % org_id,
"FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/CA%d_PRIVATE_KEY" % org_id]
self.ports = ["%d:7054" % (19000 + org_id)]
self.command = "sh -c 'fabric-ca-server start --ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.org%d.example.com-cert.pem --ca.keyfile /etc/hyperledger/fabric-ca-server-config/CA%d_PRIVATE_KEY -b admin:adminpw -d'" % (
org_id, org_id)
self.volumes = [
"./crypto-config/peerOrganizations/org%d.example.com/ca/:/etc/hyperledger/fabric-ca-server-config" % org_id]
self.container_name = "ca.org%d.example.com" % org_id
self.networks = ["byfn"]
class DockerComposeConfig(JSONObject):
def __init__(self, peer_num, org_num, orderer_num):
self.version = "2"
self.volumes = {"orderer.example.com"}
for i in range(2, orderer_num + 1):
self.volumes.add("orderer%d.example.com" % i)
for i in range(1, org_num + 1):
for j in range(peer_num):
self.volumes.add("peer%d.org%d.example.com" % (j, i))
self.networks = {"byfn"}
self.services = {}
for i in range(1, org_num + 1):
self.services["ca.org%d.example.com" % i] = CAConfig(i)
self.volumes.add("ca.org%d.example.com" % i)
self.services["orderer.example.com"] = OrdererConfig()
self.services["cli"] = CLIConfig(self.volumes)
for i in range(2, orderer_num + 1):
self.services["orderer%d.example.com" % i] = OrdererConfig(i)
for i in range(1, org_num + 1):
for j in range(peer_num):
self.services["peer%d.org%d.example.com" % (j, i)] = PeerConfig(i, j, peer_num)
if __name__ == '__main__':
config = DockerComposeConfig(3, 6, 5)
configStr = yaml.dump(config)
f = open("../out/docker-compose-e2e.yaml", "w")
f.write(re.sub("\'", "", re.sub("null", "", re.sub("!!.*\n", "\n", configStr))))
|
#operacoes matematicas
soma = 10+10 #soma
subtracao = 10-10 #subtracao
divisao = 100/10 #divisao
multiplicacao = 2*400 #multiplicacao
print(soma)
print(subtracao)
print(divisao)
print(multiplicacao)
print (10+10)
print (10+(50+50))
print (10-10)
print (1000-80)
print (10/5)
print(10/6)
print (10//6) #retornar valor redondo com inteiro na divisao
print (10*8000)
print (55*5)
|
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
from io import StringIO
import numpy as np
from scipy.interpolate import interp1d
from derivativesfedrick_hw6 import *
from integrationfedrick_hw6 import *
from nonlinequationfedrick_hw6 import *
#function for relaxation method rearranged
def f(X):
a=1
b=2
x=X[0]
y=X[1]
A=[]
for l in X:
A.append(0)
A[0]=((b/y)-a)**0.5
A[1]=x/(a + x**2)
#X[1]=((a+b**2)/b)*(y**2)
#function for newton raphson method
def f1(X):
a=1
b=2
x=X[0]
y=X[1]
A=[]
for l in X:
A.append(0)
A[0]=((b/y)-a)**0.5-x
A[1]=x/(a + x**2)-y
#X[1]=((a+b**2)/b)*(y**2)
return A
#computes the jacobian matrix for the newton raphson system
def J(X):
a=16
b=2
x=X[0]
y=X[1]
f1dxa=2*(x**3)*y+2*x*y*(a+x**2)
f1dxb=(y*(a+x**2))**2
#f1dx=(f1dxa/f1dxb)-1
f1dx=-1
#f1dy=(-(x**2))/((y**2)*(a+x**2))
f1dy=0
row1=[f1dx,f1dy]
#f2dx=(2*x*(y**2))/b
f2dx=((a+x**2)-(2*x**2))/((a+x**2)**2)
#f2dy=2*((a+x**2)/b)*y-1
f2dy=-1
row2=[f2dx,f2dy]
j=np.array([row1,row2])
return j
#output interation and results
r,ri=relax([-100,100],f, 1e-6)
nr, nri=NRsys(f1,J,1e-6,[-100,100])
print("this is the solution using relaxation: "+ str(np.array(r).real)+" with "+str(ri)+" iterations")
print("this is the solution using newton raphson: "+ str(np.array(nr).real)+" with "+str(nri)+" iterations")
|
#selection sort
def selection_sort(num):
for i in range(len(num)):
lowest_value_index=i
for j in range(i+1,len(num)):
if num[j]<num[lowest_value_index]:
lowest_value_index=j
num[i],num[lowest_value_index]=num[lowest_value_index],num[i]
list=[1,2,3,4]
selection_sort(list)
print(list)
#insertion sort
def insertionSort(arr):
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while j >=0 and key < arr[j] :
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
# main
arr = ['t','u','t','o','r','i','a','l']
insertionSort(arr)
print ("The sorted array is:")
for i in range(len(arr)):
print (arr[i])
#bubble sort
def bubble_sort(num):
swap=True
while swap:
swap=False
for i in range(len(num)-1):
if num[i]>num[i+1]:
num[i],num[i+1]=num[i+1],num[i]
swap=True
list=[23,14,66,8,2]
bubble_sort(list)
print(list)
|
'''
Now You Code 4: Syracuse Weather
Write a program to load the Syracuse weather data from Dec 2015 in
JSON format into a Python list of dictionary. The file is:
"NYC4-syr-weather-dec-2015.json"
After you load this data calculate the number of days where the
'Mean TemperatureF' is above freezing ( > 32 degrees)
Store the days above freezing and below freezing into a dictionary
and then print out the dictionary like this:
{'below-freezing': 4, 'above-freezing': 27}
'''
# TODO: Write Todo list then beneath write your code
# Write code here
import json
with open('NYC4-syr-weather-dec-2015-this-week.json') as json_data:
daily_temp = json.load(json_data)
cold_day = 0
hot_day = 0
for day in daily_temp:
for temp in day:
if(isinstance(day[temp],int) or isinstance(day[temp],float)):
if day[temp] > 32:
hot_day = hot_day + 1
elif day[temp] < 32:
cold_day = cold_day + 1
temp_stats = {}
temp_stats["below-freezing"] = cold_day
temp_stats["above-freezing"] = hot_day
print(temp_stats)
|
from pygame.constants import USEREVENT
from pygame.time import set_timer
_events = set()
class Event:
def __init__(self, callback, oneshot, timer):
self.callback = callback
self.oneshot = oneshot
self.original_timer = timer
self.remaining_timer = timer
def schedule_event(callback, timer_in_ticks, oneshot=True):
event = Event(callback, oneshot=oneshot, timer=timer_in_ticks)
_events.add(event)
return event
def clear_event(event):
if event in _events:
_events.remove(event)
def tick():
for event in _events.copy():
event.remaining_timer -= 1
if event.remaining_timer <= 0:
event.callback()
if event.oneshot:
clear_event(event)
else:
event.remaining_timer = event.original_timer
def init():
set_timer(USEREVENT, 100) |
import sys
from PyQt5.QtWidgets import (QWidget, QToolButton, QApplication,
QDesktopWidget, QGridLayout, QSplitter,
QTreeWidget, QTreeWidgetItem, QMenu, QAction,
QLineEdit, QInputDialog, QMessageBox,
QRadioButton, QCheckBox,
QPushButton)
from PyQt5.QtGui import QIcon, QFont, QCursor
from PyQt5.QtCore import QSize, Qt, pyqtSignal
# from module import floatlayout as fl
# from module import text_edit as te
import floatlayout as fl
import text_edit as te
import disk as mydisk
import orders
class Explorer(QWidget):
after_close_signal = pyqtSignal()
def __init__(self, master):
super().__init__()
self.master = master
self.initUI()
def initUI(self):
self.setContent()
self.setWindowStyle()
self.show()
def setWindowStyle(self):
self.setStyleSheet('''
QToolButton{
border:1px solid white;
}
Explorer{
background-color:white;
}
''')
self.fileView.setStyleSheet('''
QSplitter::handle{
width: 0px;
border: 0px solid gray;
}
''')
self.setWindowIcon(QIcon('icon/file.ico'))
self.setWindowTitle('文件管理器')
self.resize(1280, 800)
self.center()
def setContent(self):
# 主窗体为一个grid控件
self.mainContent = QGridLayout()
# 菜单栏
self.setContentTop()
# 设置文件浏览部分
self.setContentBody()
# 把创建的内容加入
self.setLayout(self.mainContent)
# 把主页面居中的方法
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
# 设置顶上一排东西
def setContentTop(self):
# 后退按钮
self.backward = QToolButton()
self.backward.setIcon(QIcon('icon/left.png'))
self.backward.setDisabled(True)
# 前进按钮
self.forward = QToolButton()
self.forward.setIcon(QIcon('icon/right.png'))
self.forward.setDisabled(True)
# 返回上一级按钮
self.upward = QToolButton()
self.upward.setIcon(QIcon('icon/up.png'))
self.upward.clicked.connect(self.upwardFunction)
# 地址栏
self.makeAddressBar()
# 执行框
self.makeExecuteBar()
# 加入按钮到窗体
self.mainContent.addWidget(self.backward, 0, 0)
self.mainContent.addWidget(self.forward, 0, 1)
self.mainContent.addWidget(self.upward, 0, 2)
# 设置文件浏览部分
def setContentBody(self):
# 主题为一个split控件
self.fileView = QSplitter()
# 设置左侧树
self.leftTree = MyTreeView(self)
# 设置右侧文件浏览
self.rightWidget = FileWidget(self)
# 加入左侧树和右侧文件浏览图标
self.fileView.addWidget(self.leftTree)
self.fileView.addWidget(self.rightWidget)
self.fileView.setStretchFactor(1, 11)
# 把文件浏览部分加入主体
self.mainContent.addWidget(self.fileView, 1, 0, 10, 10)
# 地址栏
def makeAddressBar(self):
self.addressBar = QLineEdit()
fileIcon = QAction(self.addressBar)
fileIcon.setIcon(QIcon('icon/file.ico'))
self.addressBar.addAction(fileIcon, QLineEdit.LeadingPosition)
self.addressBar.setText('C:/')
self.addressBar.setFont(QFont("Malgun Gothic"))
self.mainContent.addWidget(self.addressBar, 0, 3, 1, 4)
self.addressBar.setFocusPolicy(Qt.NoFocus)
self.addressBar.setStyleSheet('''
QLineEdit{
border: 1px solid #d9d9d9;
height:28px;
}
''')
# 设置执行框
def makeExecuteBar(self):
self.executeBar = QLineEdit()
executeIcon = QAction(self.executeBar)
executeIcon.setIcon(QIcon('icon/execute.png'))
executeIcon.triggered.connect(self.execute)
self.executeBar.addAction(executeIcon, QLineEdit.TrailingPosition)
self.executeBar.setText('在此输入执行语句')
# self.executeBar.editingFinished.connect(self.execute) # 判断输入完成的方式太奇怪了
self.executeBar.setFont(QFont("Malgun Gothic"))
self.executeBar.setClearButtonEnabled(True)
self.executeBar.setStyleSheet('''
QLineEdit{
border: 1px solid #d9d9d9;
height: 28px;
color: #85898c;
}
''')
self.mainContent.addWidget(self.executeBar, 0, 7, 1, 3)
# 返回上级方法
def upwardFunction(self):
path = mydisk.cut_path(self.addressBar.text())[0]
self.addressBar.setText(path)
self.rightWidget.path = path
self.rightWidget.refresh()
# 关闭动作
def closeEvent(self, e):
self.after_close_signal.emit()
# 执行语句方法
def execute(self):
order = self.rightWidget.path + '>' + self.executeBar.text()
ans = orders.parser(order)
if ans[0] == 'path':
self.rightWidget.path = ans[1]
self.rightWidget.refresh()
self.leftTree.refresh()
# 传入explorer类, 用path创建界面中的按钮
class MyTreeView(QTreeWidget):
def __init__(self, master):
super().__init__()
self.master = master
self.setHeaderHidden(True)
self.setColumnCount(1)
self.setMinimumWidth(200)
self.setStyleSheet('''
QTreeWidget{
border: 0px;
border-right:1px solid #c3c3c3;
background-color:white;
}
''')
self.myComputer = QTreeWidgetItem(self)
self.myComputer.setIcon(0, QIcon('icon/my_computer.ico'))
self.myComputer.setText(0, '我的电脑')
self.myComputer.setExpanded(True)
# C盘
self.disk_c = QTreeWidgetItem(self.myComputer)
self.disk_c.setIcon(0, QIcon('icon/disk_c.ico'))
self.disk_c.setText(0, '本地磁盘(C:)')
self.disk_c.path = 'C:/'
# D盘
self.disk_d = QTreeWidgetItem(self.myComputer)
self.disk_d.setIcon(0, QIcon('icon/disk.ico'))
self.disk_d.setText(0, '本地磁盘(D:)')
self.disk_d.path = 'D:/'
self.clicked.connect(self.leftClicked)
self.refresh()
# 树状视图左键方法
def leftClicked(self, val: object):
itemNow = self.currentItem()
if itemNow.text(0) == '我的电脑':
return
if itemNow.path in ('C:/', 'D:/'):
self.refresh()
self.master.rightWidget.path = itemNow.path
self.master.rightWidget.refresh()
# 右键菜单
def contextMenuEvent(self, e):
contextMenu = QMenu(self)
contextMenu.addAction(self.newFile())
contextMenu.addAction(self.newFolder())
contextMenu.exec_(e.globalPos())
# 新建文件
def newFile(self):
item = QAction('&新建文件', self)
item.triggered.connect(self.master.rightWidget.newFileFunction)
# item.setMenu()
return item
# 新建文件夹
def newFolder(self):
item = QAction('&新建文件夹', self)
item.triggered.connect(self.master.rightWidget.newFolderFunction)
return item
# 把子元素添加
def makeChildren(self, parent: QTreeWidgetItem):
children = mydisk.list_dir(parent.path)
for child in children:
if child['attribute'] == 8:
item = QTreeWidgetItem()
item.setText(0, child['name'])
item.path = child['path']
item.setIcon(0, QIcon('icon/file.ico'))
parent.addChild(item)
self.makeChildren(item)
# 左侧树更新
def refresh(self):
'''
只有添加,删除,修改, 移动, 复制会触发此操作
'''
for i in range(self.disk_c.childCount()):
self.disk_c.removeChild(self.disk_c.child(0))
self.makeChildren(self.disk_c)
for i in range(self.disk_d.childCount()):
self.disk_d.removeChild(self.disk_d.child(0))
self.makeChildren(self.disk_d)
class FileWidget(QWidget):
def __init__(self, master):
super().__init__()
self.path = 'C:/'
self.master = master
self.buttonList = []
self.visited = ['C:/']
self.now = 0
self.master.backward.clicked.connect(self.back)
self.master.forward.clicked.connect(self.forward)
self.clipboard = None
self.body = fl.FlowLayout()
self.setLayout(self.body)
self.refresh()
# 新建文件菜单
def newFile(self):
item = QAction('&新建文件', self)
item.triggered.connect(self.newFileFunction)
return item
# 新建文件夹
def newFolder(self):
item = QAction('&新建文件夹', self)
item.triggered.connect(self.newFolderFunction)
return item
# 粘贴
def pasetItem(self):
item = QAction('&粘贴', self)
item.triggered.connect(self.pasetFunction)
return item
# 弹出文本框
def newFileFunction(self):
self.text_edit = te.TextEdit(self.buttonList)
self.text_edit.after_close.connect(self.createFileOnDisk)
# 真正再磁盘上建立文件
def createFileOnDisk(self, info: dict):
info['path'] = mydisk.format_path(self.path)
mydisk.create_file(info)
self.refresh()
self.master.leftTree.refresh()
# 创建按钮
def createButton(self, info: dict):
button = MyButton(self, info)
button.setText(mydisk.join_name(info['name'], info['ext']))
button.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
button.setFont(QFont("Malgun Gothic", 10))
button.setFixedSize(120, 150)
button.setIconSize(QSize(120, 120))
ext = info['ext']
if ext == 'ex':
button.setIcon(QIcon('icon/exe.ico'))
elif ext == 'tx':
button.setIcon(QIcon('icon./text.ico'))
elif info['attribute'] == 8:
button.setIcon(QIcon('icon/file3'))
else:
button.setIcon(QIcon('icon/empty.ico'))
return button
# 把按钮加入界面, 并存入ButtonList数组
def addButton(self, info: dict):
'''
传入新文件信息 {
'name': str,
'ext': str,
'attribute': int,
'length': int,
'text': str
}
'''
btn = self.createButton(info)
self.buttonList.append(btn)
self.body.addWidget(btn)
# 按键
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton:
for i in self.buttonList:
i.setStyleSheet('''
QToolButton{
background-color:white;
}
QToolButton:hover{
background-color: #e5f3ff;
}
''')
else:
contextMenu = QMenu(self)
contextMenu.addAction(self.refreshAll())
contextMenu.addSeparator()
contextMenu.addAction(self.newFile())
contextMenu.addAction(self.newFolder())
paset = self.pasetItem()
if self.clipboard is None:
paset.setDisabled(True)
contextMenu.addAction(paset)
contextMenu.exec_(e.globalPos())
# 新建文件夹
def newFolderFunction(self):
text, ok = QInputDialog.getText(self, '新的文件夹', '输入文件夹名:')
if not mydisk.is_dir_name(text):
self.errorBox('文件夹名不合法!')
return
for i in self.buttonList:
if i.text() == text:
self.errorBox('文件夹名重复!')
return
if ok and len(text) != 0:
mydisk.create_dir(mydisk.format_path(self.path)+'/'+text)
self.refresh()
self.master.leftTree.refresh()
# 粘贴
def pasetFunction(self):
# print('paset', self.clipboard)
name = mydisk.join_name(self.clipboard['name'], self.clipboard['ext'])
for i in self.buttonList:
if i.text() == name:
self.errorBox('存在同名文件或文件夹!')
return
if self.path.find(self.clipboard['path']) == 0:
self.errorBox('目标文件夹存在于原文件夹内')
return
if self.clipboard['operation'] == 'copy':
mydisk.copy(self.clipboard['path'], self.path)
else:
mydisk.move(self.clipboard['path'], self.path)
self.clipboard = None
self.refresh()
self.master.leftTree.refresh()
# 后退
def back(self):
# 后退之后必然可以前进
self.master.forward.setDisabled(False)
self.now -= 1
self.path = self.visited[self.now]
self.refresh(1)
if self.now <= 0:
# 已经不能再后退
self.master.backward.setDisabled(True)
return
# 前进
def forward(self):
# 前进之后必然可以后退
self.master.backward.setDisabled(False)
self.now += 1
self.path = self.visited[self.now]
self.refresh(2)
if self.now == len(self.visited) - 1:
# 已经不能再前进
self.master.forward.setDisabled(True)
return
# 全部刷新
def refreshAll(self):
item = QAction('刷新', self)
item.triggered.connect(self.refreshAllFunction)
return item
# 全部刷新方法
def refreshAllFunction(self):
self.refresh()
self.master.leftTree.refresh()
# 刷新
def refresh(self, op: int = 0):
if self.path in ('C:', 'c:', 'd:', 'D:'):
self.path += '/'
# 普通访问路径
if op == 0 and self.path != self.visited[self.now]:
self.now += 1
self.visited = self.visited[:self.now]
self.visited.append(self.path)
# 普通访问路径会导致前进按钮失效
self.master.forward.setDisabled(True)
# 已经有可以后退的路径
if self.now > 0:
self.master.backward.setDisabled(False)
self.master.addressBar.setText(self.path)
for btn in self.buttonList:
btn.deleteLater()
self.buttonList = []
self.file_list = mydisk.list_dir(self.path)
# print(self.file_list)
for f in self.file_list:
self.addButton(f)
# print(self.now, self.visited)
# 错误提示框
def errorBox(self, mes: str):
msgBox = QMessageBox(
QMessageBox.Warning,
"警告!",
mes,
QMessageBox.NoButton,
self
)
msgBox.addButton("确认", QMessageBox.AcceptRole)
msgBox.exec_()
class MyButton(QToolButton):
def __init__(self, master, info: dict):
super().__init__()
self.master = master
self.info = info
self.buttonType = ''
self.isFocused = False
if info['attribute'] == 8:
self.buttonType = 'folder'
elif info['ext'] == 'tx':
self.buttonType = 'file'
elif info['ext'] == 'ex':
self.buttonType = 'exe'
self.setMyStyle()
# 双击
def mouseDoubleClickEvent(self, e):
# print(self.buttonType, self.info)
# 可执行文件双击方法, 运行程序
if self.buttonType == 'exe':
_file = mydisk.open_file(self.info['path'])
if not _file:
self.errorBox('打开文件错误')
self.master.master.master.execute(_file['text'])
return
# 文件夹双击方法, 进入此文件夹
if self.buttonType == 'folder':
if mydisk.get_block(self.info['path'])[0] == -1:
return
self.master.path = self.info['path']
self.master.refresh()
return
# 文件双击方法,打开文件
# print('Edit', self.info['path'])
self.editMenuDialog()
# 单击
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton:
self.leftClicked()
else:
self.buttonContext()
# 文件右键菜单
def buttonContext(self):
menu = QMenu()
if self.buttonType != 'folder':
menu.addAction(self.editMenu())
menu.addAction(self.cutMenu())
menu.addAction(self.copyMenu())
menu.addSeparator()
menu.addAction(self.deleteMenu())
menu.addSeparator()
menu.addAction(self.attributeMenu())
else:
menu.addAction(self.cutMenu())
menu.addAction(self.copyMenu())
menu.addAction(self.renameMenu())
menu.addAction(self.deleteMenu())
menu.exec_(QCursor.pos())
# 编辑
def editMenu(self):
item = QAction('&编辑(E)', self)
item.triggered.connect(self.editMenuDialog)
return item
# 弹出编辑框
def editMenuDialog(self):
_file = mydisk.open_file(self.info['path'])
if not _file:
return
self.text_edit = te.TextEdit(
self.master.buttonList,
[mydisk.join_name(_file['name'], _file['ext']), _file['text'], self.info['attribute']],
'edit'
)
self.text_edit.after_close.connect(self.editMenuFunction)
# 把编辑完的结果写入磁盘
def editMenuFunction(self, info: dict):
mydisk.modify_file(self.info['path'], info)
self.master.refresh()
self.master.master.leftTree.refresh()
# 剪切
def cutMenu(self):
item = QAction('&剪切(X)', self)
item.triggered.connect(self.cutFunction)
return item
# 复制
def copyMenu(self):
item = QAction('&复制(C)', self)
# item.setShortcut(QKeySequence(QKeySequence.Copy))
# item.setShortcutContext(Qt.WidgetWithChildrenShortcut)
item.triggered.connect(self.copyFunction)
return item
# 复制方法
def copyFunction(self):
self.info['operation'] = 'copy'
self.master.clipboard = self.info
# 剪切方法
def cutFunction(self):
self.info['operation'] = 'cut'
self.master.clipboard = self.info
# 删除
def deleteMenu(self):
item = QAction('&删除(D)', self)
item.triggered.connect(self.deleteFunction)
return item
# 文件夹重命名
def renameMenu(self):
item = QAction('&重命名(M)', self)
item.triggered.connect(self.renameFunction)
return item
# 给文件夹重命名的方法
def renameFunction(self):
text, ok = QInputDialog.getText(self, '重命名文件夹', '输入文件夹名:')
if not mydisk.is_dir_name(text):
self.errorBox('文件夹名不合法!')
return
for i in self.master.buttonList:
if i.text() == text:
self.errorBox('文件夹名重复!')
return
if ok and len(text) != 0:
mydisk.modify_dir(self.info['path'], text)
self.master.refresh()
self.master.master.leftTree.refresh()
# 属性
def attributeMenu(self):
item = QAction('&属性(R)', self)
item.triggered.connect(self.attributeDialog)
return item
# 属性对话框
def attributeDialog(self):
self.attr = AttributeBox(self.info['attribute'])
self.attr.show()
self.attr.after_close.connect(self.attributeChange)
# 修改磁盘中的数据
def attributeChange(self, attr: int):
print(mydisk.change(self.info['path'], attr))
self.master.refresh()
# 左键选中
def leftClicked(self):
for i in self.master.buttonList:
i.isFocused = False
i.setStyleSheet('''
QToolButton{
background-color:white;
border: 1px solid white;
}
QToolButton:hover{
background-color: #e5f3ff;
}
''')
self.isFocused = True
self.setStyleSheet('''
QToolButton{
background-color:#e5f3ff;
border: 1px solid #99d1ff;
}
QToolButton:hover{
background-color: #e5f3ff;
}
''')
# 删除方法
def deleteFunction(self):
if self.info['attribute'] == 8:
mydisk.delete_dir(self.info['path'])
else:
# 不能删除系统文件
if self.info['attribute'] & 2 == 0:
mydisk.delete_file(self.info['path'])
else:
self.errorBox('系统文件不能删除!')
self.master.refresh()
self.master.master.leftTree.refresh()
# 错误提示框
def errorBox(self, mes: str):
msgBox = QMessageBox(
QMessageBox.Warning,
"警告!",
mes,
QMessageBox.NoButton,
self
)
msgBox.addButton("确认", QMessageBox.AcceptRole)
msgBox.exec_()
def setMyStyle(self):
self.setStyleSheet('''
QToolButton:hover{
background-color: #e5f3ff;
}
''')
class AttributeBox(QWidget):
after_close = pyqtSignal(int)
def __init__(self, attribute: int = 0):
super().__init__()
self.readOnlyBox = QCheckBox('只读')
self.systemBox = QRadioButton('系统文件')
self.fileBox = QRadioButton('普通文件')
self.confimButton = QPushButton()
self.confimButton.setText('确认')
self.confimButton.clicked.connect(self.confimFunction)
self.cancelButton = QPushButton('取消')
self.cancelButton.setText('取消')
self.cancelButton.clicked.connect(self.close)
ly = QGridLayout()
ly.addWidget(self.readOnlyBox, 0, 0)
ly.addWidget(self.systemBox, 0, 1)
ly.addWidget(self.fileBox, 0, 2)
ly.addWidget(self.confimButton, 1, 1)
ly.addWidget(self.cancelButton, 1, 2)
self.setLayout(ly)
if attribute & 1 != 0:
self.readOnlyBox.setChecked(True)
if attribute & 2 != 0:
self.systemBox.setChecked(True)
if attribute & 4 != 0:
self.fileBox.setChecked(True)
def confimFunction(self):
if self.systemBox.isChecked():
ans = 2
else:
ans = 4
if self.readOnlyBox.isChecked():
ans += 1
self.after_close.emit(ans)
self.close()
def main():
app = QApplication(sys.argv)
ex = Explorer()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
# 读取mnist数据集
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 看前20张训练图片的label
for i in range(20):
# 得到one-hot表示,形如(0, 1, 0, 0, 0, 0, 0, 0, 0, 0)
one_hot_label = mnist.train.labels[i, :]
# 通过np.argmax我们可以直接获得原始的label
label = np.argmax(one_hot_label)
print('mnist_train_%d.jpg label: %d' % (i, label))
'''
mnist_train_0.jpg label: 7
mnist_train_1.jpg label: 3
mnist_train_2.jpg label: 4
mnist_train_3.jpg label: 6
mnist_train_4.jpg label: 1
mnist_train_5.jpg label: 8
mnist_train_6.jpg label: 1
mnist_train_7.jpg label: 0
mnist_train_8.jpg label: 9
mnist_train_9.jpg label: 8
mnist_train_10.jpg label: 0
mnist_train_11.jpg label: 3
mnist_train_12.jpg label: 1
mnist_train_13.jpg label: 2
mnist_train_14.jpg label: 7
mnist_train_15.jpg label: 0
mnist_train_16.jpg label: 2
mnist_train_17.jpg label: 9
mnist_train_18.jpg label: 6
mnist_train_19.jpg label: 0
''' |
import numpy as np
import math
from MDP.MDP import MDP
from graphics import *
class Simulator:
def __init__(self, num_games=0, alpha_value=0, gamma_value=0, epsilon_value=0):
'''
Setup the Simulator with the provided values.
:param num_games - number of games to be trained on.
:param alpha_value - 1/alpha_value is the decay constant.
:param gamma_value - Discount Factor.
:param epsilon_value - Probability value for the epsilon-greedy approach.
'''
self.num_games = num_games
self.epsilon_value = epsilon_value
self.alpha_value = alpha_value
self.gamma_val = gamma_value
self.Q = np.zeros((3,144,2,3,12,1))
self.arr_states = []
def f_function(self, mdpInstance):
'''
Choose action based on an epsilon greedy approach
:return action selected
'''
action_selected = None #should be 0 for no move, 1 for up, or 2 for down
x = np.random.random()
if x < self.epsilon_value:
action_selected = np.random.randint(low=0,high=2)
else:
discrete = MDP.discretize_state(mdpInstance)
curr_state = self.Q[:,int(discrete[0]),discrete[1],discrete[2],int(discrete[3]),discrete[4]]
max_val = -1
for i in range(len(curr_state)):
if curr_state[i] > max_val:
max_val = curr_state[i]
action_selected = i
return action_selected
def train_agent(self,should_show_gui):
'''
Train the agent over a certain number of games.
'''
if should_show_gui:
win = GraphWin('Pong game',500, 500)
ball_count = 0
for i in range(self.num_games):
if should_show_gui:
mdpInstance = MDP(0.5, 0.5, 0.03, 0.01, 0.5 - .2/2, win)
else:
mdpInstance = MDP(0.5,0.5,0.03,0.01,0.5 - .2/2,None)
self.play_game(mdpInstance)
ball_count += MDP.get_ball_count(mdpInstance)
if should_show_gui:
win.close()
print("average: ",float(ball_count)/float(self.num_games))
pass
def play_game(self, mdpInstance):
'''
Simulate an actual game till the agent loses.
'''
'''initial_state = discretize_step
new_action = f_function()
simulate_one_time_step(new_action)
new_state = discretize_step()
'''
didLose = False
while didLose is False:
prev_tuple = MDP.discretize_state(mdpInstance)
self.arr_states.append(prev_tuple)
prev_action = self.f_function(mdpInstance)
shouldReward = MDP.simulate_one_time_step(mdpInstance,prev_action)
new_tuple = MDP.discretize_state(mdpInstance)
if new_tuple[4] == 1:
error = -1 + self.gamma_val * 0 - self.Q[prev_action,int(prev_tuple[0]),prev_tuple[1],prev_tuple[2],int(prev_tuple[3]),prev_tuple[4]]
self.Q[prev_action,int(prev_tuple[0]),prev_tuple[1],prev_tuple[2],int(prev_tuple[3]),prev_tuple[4]] += self.alpha_value*error
didLose = True
break
max_state = self.Q[:,int(new_tuple[0]),new_tuple[1],new_tuple[2],int(new_tuple[3]),new_tuple[4]]
#update Q
max_val = -1
max_Q = 0
for i in range(len(max_state)):
if max_state[i] > max_val:
max_val = max_state[i]
max_Q = i
if shouldReward:
error = 1 + self.gamma_val * max_val - self.Q[prev_action,int(prev_tuple[0]),prev_tuple[1],prev_tuple[2],int(prev_tuple[3]),prev_tuple[4]]
self.Q[prev_action,int(prev_tuple[0]),prev_tuple[1],prev_tuple[2],int(prev_tuple[3]),prev_tuple[4]] += self.alpha_value*error
else:
error = 0 + self.gamma_val * max_val - self.Q[prev_action,int(prev_tuple[0]),prev_tuple[1],prev_tuple[2],int(prev_tuple[3]),prev_tuple[4]]
self.Q[prev_action,int(prev_tuple[0]),prev_tuple[1],prev_tuple[2],int(prev_tuple[3]),prev_tuple[4]] += self.alpha_value*error
pass
def change_parameters(self,epsilon,num_games):
self.epsilon_value = epsilon
self.num_games = num_games
|
from functools import wraps
import requests
from jose import jwt
import json
import logging
import os
from flask import Flask, request, jsonify, _request_ctx_stack, redirect, url_for, session
# Error handler
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
class AadAuth:
def __init__(self, flask_app, tenant_name, tenant_id, api_audience, srv_app_client_name, srv_app_client_id, srv_app_client_secret) -> None:
self.app = flask_app
self.TENANT_NAME = tenant_name
self.TENANT_ID = tenant_id
self.API_AUDIENCE = api_audience
self.session_token_key = "auth_token"
self.session_login_origin = ""
self.SRV_APP_CLIENT_NAME = srv_app_client_name
self.SRV_APP_CLIENT_ID = srv_app_client_id
self.SRV_APP_CLIENT_SECRET = srv_app_client_secret
self.AUTH_CALLBACK_ENDPOINT = "callback"
@self.app.route(f"/{self.AUTH_CALLBACK_ENDPOINT}")
def login_callback():
aad_code = request.args.get('code')
srv_app_client_redirect_uri = request.url_root + "callback"
headers = {
'Accept': '*/*',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'login.microsoftonline.com',
'accept-encoding': 'gzip, deflate',
'cache-control': 'no-cache'
}
payload = {
'redirect_uri': srv_app_client_redirect_uri,
'client_id': self.SRV_APP_CLIENT_ID,
'grant_type': 'authorization_code',
'code': aad_code,
'client_secret': self.SRV_APP_CLIENT_SECRET,
'scope': f"https://{self.SRV_APP_CLIENT_NAME}.{self.TENANT_NAME}.onmicrosoft.com/user_impersonation"
}
url = f"https://login.microsoftonline.com/{self.TENANT_NAME}.onmicrosoft.com/oauth2/v2.0/token"
rsp = requests.request("POST", url, headers=headers, data=payload)
json_data = json.loads(rsp.text)
token_value = json_data["access_token"]
token_type = json_data["token_type"]
auth_token_full = f"{token_type} {token_value}"
session[self.session_token_key] = auth_token_full
# url_ree = url_for("vmInfo")
url_ree = session.get(self.session_login_origin)
return redirect(location=url_ree, code=301)
@self.app.errorhandler(AuthError)
def handle_auth_error(ex):
print('handling error')
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
def get_token_auth_header(self):
"""Obtains the Access Token from the Authorization Header
"""
auth = self.get_bearer_token()
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
elif len(parts) == 1:
raise AuthError({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def get_bearer_token(self):
auth_header = request.headers.get("Authorization", None)
auth_ses = session.get(self.session_token_key)
if auth_header:
auth = auth_header
elif auth_ses:
auth = auth_ses
else:
auth = None
return auth
def trigger_login(self):
if session.get(self.session_token_key):
session.pop(self.session_token_key)
srv_app_client_redirect_uri = request.url_root + self.AUTH_CALLBACK_ENDPOINT
request_response_code_url_with_id = f"https://login.microsoftonline.com/{self.TENANT_ID}/oauth2/v2.0/authorize" \
"?response_type=code" + \
f"&client_id={self.SRV_APP_CLIENT_ID}" + \
f"&redirect_uri={srv_app_client_redirect_uri}" + \
"&scope=openid"
return redirect(request_response_code_url_with_id, code=302)
def requires_auth(self, f):
"""Determines if the Access Token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
try:
session[self.session_login_origin] = request.base_url
auth = self.get_bearer_token()
if not auth:
return self.trigger_login()
token = self.get_token_auth_header()
jsonurl = requests.get(
"https://login.microsoftonline.com/" + self.TENANT_ID + "/discovery/v2.0/keys")
jwks = json.loads(jsonurl.content)
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
except Exception:
raise AuthError({"code": "invalid_header",
"description":
"Unable to parse authentication"
" token."}, 401)
if rsa_key:
try:
payload = jwt.decode(
token=token,
key=rsa_key,
algorithms=["RS256"],
audience=self.API_AUDIENCE,
issuer="https://sts.windows.net/" + self.TENANT_ID + "/"
)
except jwt.ExpiredSignatureError:
# raise AuthError({"code": "token_expired",
# "description": "token is expired"}, 401)
return self.trigger_login()
except jwt.JWTClaimsError:
raise AuthError({"code": "invalid_claims",
"description":
"incorrect claims,"
"please check the audience and issuer"}, 401)
except Exception:
raise AuthError({"code": "invalid_header",
"description":
"Unable to parse authentication"
" token."}, 401)
_request_ctx_stack.top.current_user = payload
return f(*args, **kwargs)
raise AuthError({"code": "invalid_header",
"description": "Unable to find appropriate key"}, 401)
return decorated
# Controllers API
# def requires_scope(required_scope):
# """Determines if the required scope is present in the Access Token
# Args:
# required_scope (str): The scope required to access the resource
# """
# token = get_token_auth_header()
# unverified_claims = jwt.get_unverified_claims(token)
# if unverified_claims.get("scope"):
# token_scopes = unverified_claims["scope"].split()
# for token_scope in token_scopes:
# if token_scope == required_scope:
# return True
# return False
|
""" Создать программно файл в текстовом формате,
записать в него построчно данные, вводимые пользователем.
Об окончании ввода данных свидетельствует пустая строка.
"""
str = input('Введите строку для записи в файл: ')
file = open("lesson5_1.txt", 'a', encoding="utf-8")
while str:
file.write(str)
str = input("Введите новую строку. Если строка пуста, запись завершиться. ")
|
'''
Created on 24 Jan. 2019
@author: smithea3
'''
import requests
import json
import time
import dateutil.parser
import datetime
class APIinteraction(object):
'''
classdocs
contains functions for interacting with HP's warranty API.
Token and JSON related code sourced from example code provided from HP.
Remainder has been adapted and rewritten to be modular from same example
'''
def __init__(self, assetDictionary):
'''
Constructor
'''
self.url = 'https://css.api.hp.com'
self.assetDictionary = assetDictionary
self.apiKey = 'api key here'
self.apiSecret = 'api secret here'
self.token = ''
self.job = ''
self.results = ''
# get token from HP API using Key and Secret
def getToken(self):
tokenBody = { 'apiKey': self.apiKey, 'apiSecret': self.apiSecret, 'grantType': 'client_credentials', 'scope': 'warranty' }
tokenHeaders = { 'Accept': 'application/json' }
tokenResponse = requests.post((self.url + '/oauth/v1/token'), data=tokenBody, headers=tokenHeaders)
tokenJson = tokenResponse.json()
self.token = tokenJson['access_token']
# creates batch job and sends request to HP
def batchJob(self):
jobHeaders = {
'Accept': 'application/json',
'Authorization': 'Bearer ' + self.token,
'Content-Type': 'application/json'
}
print('Creating new batch job...')
self.job = requests.post(self.url + '/productWarranty/v2/jobs/', data=json.dumps(self.assetDictionary), headers=jobHeaders).json()
# Feedback to console for user
print('Batch job created successfully.')
print('--------------------')
print('Job ID: ' + self.job['jobId'])
print('Estimated time in seconds to completion: ' + str(self.job['estimatedTime']))
print('')
if (self.job['estimatedTime'] > 1200):
time.sleep(40)
else:
time.sleep(20)
def jobMonitor(self):
headers = {
'Authorization': 'Bearer ' + self.token,
'Accept-Encoding': 'gzip,deflate'
}
status = 'incomplete'
while (status == 'incomplete'):
monitorResponse = requests.get(self.url + '/productWarranty/v2/jobs/' + self.job['jobId'], headers=headers)
monitor = monitorResponse.json()
if (monitor['status'] != "completed"):
if (monitor['estimatedTime'] > 1200):
print(monitor)
print('Estimated time in seconds to completion: ' + str(monitor['estimatedTime']) + '\nNext job check in 10 minutes...\n')
time.sleep(200)
elif (monitor['estimatedTime'] > 600):
print(monitor)
print('Estimated time in seconds to completion: ' + str(monitor['estimatedTime']) + '\nNext job check in 5 minutes...\n')
time.sleep(100)
else:
print(monitor)
print('Estimated time in seconds to completion: ' + str(monitor['estimatedTime']) + '\nNext job check in 1 minute...\n')
time.sleep(10)
else:
status = 'complete'
self.results = requests.get(self.url + '/productWarranty/v2/jobs/' + self.job['jobId'] + '/results', headers=headers).json()
print('Batch job complete: \n')
def createJSONFile(self, sFileSaveLoc):
try:
with open(sFileSaveLoc + self.job['jobId'] + '.json', 'w') as outFile:
json.dump(self.results, outFile)
except Exception as e:
print(e)
def compileResults(self):
returnList = []
todaysDate = datetime.date.today()
for r in self.results:
warrantyStatus = 'No warranty'
serialNumber = r['product']['serialNumber']
# documentation for JSON fields available at HP Developers portal
for offer in r['offers']:
try:
if (offer.get('serviceObligationLineItemEndDate') and not (offer['offerDescription'] == 'Wty: HP Support for Initial Setup')):
endDateParsed = dateutil.parser.parse(offer['serviceObligationLineItemEndDate']).date()
if (endDateParsed > todaysDate):
warrantyStatus = 'Warranty active'
warrantyEndDate = endDateParsed
elif (endDateParsed < todaysDate):
warrantyStatus = 'Warranty Expired'
warrantyEndDate = endDateParsed
except: # missing records
warrantyStatus = 'ERR: unable to retrieve'
warrantyEndDate = 'ERR: unable to retrieve'
returnList.append({'sn' : serialNumber,
'Warranty_Status' : warrantyStatus,
'Warranty_End_Date' : warrantyEndDate})
self.results = returnList
|
"""Strip ideal tables and create .csv charts from them."""
import pandas as pd
import xmlStaticOperators
import xml.etree.ElementTree as ET
class xmlTableStripper(object):
def __init__(self, out_path, out_fiche, out_page, zone_element):
self.out_path = out_path
self.out_fiche = out_fiche
self.out_page = out_page
self.zone_element = zone_element
self.grid_dimensions = self.define_table_structure()
self.table_dataframe = self.create_pandas_dataframe()
self.table_cell_dictionary = self.strip_table_data()
self.table_keys = self.populate_data_frame()
def define_table_structure(self):
"""Identify table structure via <gridTable> tag contents."""
count_columns = 0
count_rows = 0
grid_table = self.zone_element.find('.//gridTable')
for dimension in grid_table.iter():
if dimension.tag == 'gridCol':
count_columns += 1
elif dimension.tag == 'gridRow':
count_rows += 1
grid_dimensions = [count_columns, count_rows]
return grid_dimensions
def create_pandas_dataframe(self):
"""Create DataFrame shell based on ID'd table structure."""
columns = list(range(0, self.grid_dimensions[0]))
rows = list(range(0, self.grid_dimensions[1]))
df_shell = pd.DataFrame(index=rows, columns=columns)
return df_shell
def strip_table_data(self):
"""Strip cell data using <gridColumn> and <gridRow> data."""
table_cell_dictionary = {}
for i, cell in enumerate(self.zone_element.findall('.//cellZone')):
cell_data = []
cell_column = cell.get('gridColFrom')
cell_row = cell.get('gridRowFrom')
for child in cell.iter():
if child.tag == 'wd':
child.text = xmlStaticOperators.none_to_empty(child.text)
cell_data.append(child.text)
cell_data = ' '.join(cell_data)
table_cell_dictionary.update({i:[cell_column, cell_row, cell_data]})
return table_cell_dictionary
def populate_data_frame(self):
"""Populate DataFrame cell with stripped data"""
table_keys = []
for value in self.table_cell_dictionary.values():
column = int(value[0])
row = int(value[1])
cell_value = value[2]
self.table_dataframe.at[row, column] = cell_value
if column == 0:
table_keys.append(cell_value)
xmlStaticOperators.data_to_csv(self.table_dataframe, True, True, self.out_path, self.out_fiche,
self.out_page, False, False, 'table_as_csv')
return table_keys
|
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.http import HttpResponse
from User.models import User, Star
import requests
import os
import gc
import random
import time
import json
import sys
sys.path.append('...')
from recog import sourlab_face
sourlab_face.load_feature()
recog1 = sourlab_face(12)
def getFileType(filename):
binfile = open(filename, 'rb')
ftype = 'unknown'
binfile.seek(0)
first3 =tuple(binfile.read(3))
if first3== (0xFF,0xD8,0xFF):
ftype = 'jpg'
else:
binfile.seek(0)
first4 =tuple(binfile.read(4))
if first4 == (0x89,0x50,0x4E,0x47):
ftype = 'png'
binfile.close()
return ftype
@csrf_exempt
def starmovie(request):
if request.method == 'POST':
try:
uname = request.POST['user']
upic = request.FILES.get('upfile')
fileName = str(int(round(time.time() * 1000))) + str(random.randint(1, 100))
f = open(os.path.join('...', fileName), 'wb')
for chunk in upic.chunks():
f.write(chunk)
f.close()
fileType = getFileType(os.path.join('...', fileName))
if fileType == 'unknown':
res = {"errorCode": 100, "errorMsg": "ImageFormatError"}
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type="application/json")
else:
os.rename(os.path.join('...', fileName), os.path.join('...', fileName + "." + fileType))
fileName = fileName + "." + fileType
feature = recog1.recognition(os.path.join('...', fileName))
print(feature)
if isinstance(feature, int):
if(feature == -1):
res = {"errorCode": -1, "errorMsg": "detect no fase"}
return HttpResponse(json.dumps(res), content_type="application/json")
else:
res = {"errorCode": 0, "errorMsg": "detect more than 2 faces"}
return HttpResponse(json.dumps(res), content_type="application/json")
else:
sname, similarity, simg_path = recog1.find_nearest(feature)
similarity = round(similarity, 2)
print(similarity)
try:
user = User.objects.get(username=uname)
user.userpic = os.path.join('', fileName)
user.starname = sname
user.starpic = simg_path
user.similarity = similarity
user.save()
except User.DoesNotExist:
newuser = User(username=uname, starname=sname, userpic=os.path.join('...', fileName),
starpic=simg_path, similarity=similarity)
newuser.save()
finally:
star = Star.objects.get(starname=sname)
moviedes = star.description
moviename = star.moviename
print(sname)
print(simg_path)
print(moviedes)
print(moviename)
res = {
"errorCode": 1,
"errorMsg": "success",
"similarity": similarity,
"starname": sname,
"starpic": simg_path,
"moviename": moviename,
"moviedes": moviedes,
}
return HttpResponse(json.dumps(res), content_type="application/json")
except Exception as e:
res = {"errorCode": -5, "errorMsg": "uploadfail"}
return HttpResponse(json.dumps(res), content_type="application/json")
return HttpResponse('requestfail')
|
import tkinter as tk
from PIL import Image
from PIL import ImageTk
import cv2
import numpy as np
import hashlib
import binascii
import textwrap
from scipy.integrate import odeint
from bisect import bisect_left as bsearch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
from importlib import reload
import os
root =tk.Tk()
root.title("Image Encrption using Dynamic DNA Cryptography")
root.configure(background="black")
cwgt=tk.Canvas(root)
cwgt.pack(expand=True, fill=tk.BOTH)
image1=ImageTk.PhotoImage(file="BG.jpeg")
w,h=image1.width(),image1.height()
root.geometry('535x405')
cwgt.img=image1
cwgt.create_image(0, 0, anchor=tk.NW, image=image1)
x0,y0,z0=0,0,0
a, b, c = 10, 2.667, 28
tmax, N = 100, 10000
dna={}
dna["00"]="A"
dna["01"]="T"
dna["10"]="G"
dna["11"]="C"
dna["A"]=[0,0]
dna["T"]=[0,1]
dna["G"]=[1,0]
dna["C"]=[1,1]
dna["AA"]=dna["TT"]=dna["GG"]=dna["CC"]="A"
dna["AG"]=dna["GA"]=dna["TC"]=dna["CT"]="G"
dna["AC"]=dna["CA"]=dna["GT"]=dna["TG"]="C"
dna["AT"]=dna["TA"]=dna["CG"]=dna["GC"]="T"
global tr
print(dna)
class MyButton:
def __init__(self, root):
self.f = tk.Frame(root)
self.f.propagate(0)
self.f.pack()
b1=tk.Button(root,text="Encrypt",bg="lightblue",fg="black", cursor='watch',command=self.main).place(x=150,y=10, width=100,height=30)
b2=tk.Button(root,text="Decrypt",bg="lightblue",fg="black", cursor='watch',command=self.otherside).place(x=250,y=10, width=100, height=30)
b3=tk.Button(root,text="Get Encrypted Image",bg="lightblue",fg="black", cursor='watch',command=self.getimg).place(x=150,y=40, width=200, height=30)
#b2=tk.Button(root,text="recieve",bg="lightblue",fg="black", cursor='watch').place(x=450,y=10, width=100, height=30)
b4= tk.Button(root, text="Exit", bg="lightblue", fg="BLACK",cursor='watch',command=exit).place(x=230,y=370, width=100, height=30)
def main(self):
from tkinter import filedialog
path = "NULL"
path = filedialog.askopenfilename()
if path!="NULL":
print("Image loaded!",path)
else:
print("Error Image not loaded!",path)
print("hi")
print(path)
print("hi")
image = cv2.imread(path)
print(path)
red = image[:,:,2]
green = image[:,:,1]
blue = image[:,:,0]
for values, channel in zip((red, green, blue), (2,1,0)):
print("done")
img = np.zeros((values.shape[0], values.shape[1]), dtype = np.uint8)
img[:,:] = (values)
if channel == 0:
B = np.asmatrix(img)
elif channel == 1:
G = np.asmatrix(img)
else:
R = np.asmatrix(img)
print("done ok RED \n",R)
print("done ok GREEN \n",G)
print("done ok BLUE \n",B)
img = Image.open(path)
m, n = img.size
print("sol")
print("pixels: {0} width: {2} height: {1} ".format(m*n, m, n))
pix = img.load()
plainimage = list()
print(plainimage)
for y in range(n):
for x in range(m):
for k in range(0,3):
plainimage.append(pix[x,y][k])
key = hashlib.sha256()
key.update(bytearray(plainimage))
kn=key.hexdigest()
print(kn)
def update_lorents(kn):
key_bin = bin(int(kn, 16))[2:].zfill(256) #covert hex key digest to binary
k={} #key dictionary
key_32_parts=textwrap.wrap(key_bin, 8) #slicing key into 8 parts
num=1
for i in key_32_parts:
k["k{0}".format(num)]=i
num = num + 1
t1 = t2 = t3 = 0
for i in range (1,12):
t1=t1^int(k["k{0}".format(i)],2)
for i in range (12,23):
t2=t2^int(k["k{0}".format(i)],2)
for i in range (23,33):
t3=t3^int(k["k{0}".format(i)],2)
global x0 ,y0, z0
x0=x0 + t1/256
y0=y0 + t2/256
z0=z0 + t3/256
print(x0)
print("done")
update_lorents(kn)
def dna_encode(b,g,r):
print("en to ds")
b = np.unpackbits(b,axis=1)
g = np.unpackbits(g,axis=1)
r = np.unpackbits(r,axis=1)
m,n = b.shape
r_enc= np.chararray((m,int(n/2)))
g_enc= np.chararray((m,int(n/2)))
b_enc= np.chararray((m,int(n/2)))
for color,enc in zip((b,g,r),(b_enc,g_enc,r_enc)):
idx=0
for j in range(0,m):
for i in range(0,n,2):
enc[j,idx]=dna["{0}{1}".format(color[j,i],color[j,i+1])]
idx+=1
if (i==n-2):
idx=0
break
b_enc=b_enc.astype(str)
g_enc=g_enc.astype(str)
r_enc=r_enc.astype(str)
print("ddone")
return b_enc,g_enc,r_enc
bn,gn,rn=dna_encode(B,G,R)
print("blue\n",bn)
print("green\n",gn)
print("red \n",rn)
def key_matrix_encode(kn,b):
b = np.unpackbits(b,axis=1)
m,n = b.shape
key_bin = bin(int(kn, 16))[2:].zfill(256)
Mk = np.zeros((m,n),dtype=np.uint8)
x=0
for j in range(0,m):
for i in range(0,n):
Mk[j,i]=key_bin[x%256]
x+=1
Mk_enc=np.chararray((m,int(n/2)))
idx=0
for j in range(0,m):
for i in range(0,n,2):
if idx==(n/2):
idx=0
Mk_enc[j,idx]=dna["{0}{1}".format(Mk[j,i],Mk[j,i+1])]
idx+=1
Mk_enc=Mk_enc.astype(str)
return Mk_enc
mmk=key_matrix_encode(kn,B)
print("fi\n",mmk)
def xor_operation(b,g,r,mk):
m,n = b.shape
bx=np.chararray((m,n))
gx=np.chararray((m,n))
rx=np.chararray((m,n))
b=b.astype(str)
g=g.astype(str)
r=r.astype(str)
for i in range(0,m):
for j in range (0,n):
bx[i,j] = dna["{0}{1}".format(b[i,j],mk[i,j])]
gx[i,j] = dna["{0}{1}".format(g[i,j],mk[i,j])]
rx[i,j] = dna["{0}{1}".format(r[i,j],mk[i,j])]
bx=bx.astype(str)
gx=gx.astype(str)
rx=rx.astype(str)
print("xor",bx)
return bx,gx,rx
blue_final, green_final, red_final = xor_operation(bn,gn,rn,mmk)
def lorenz(X, t, a, b, c):
x, y, z = X
x_dot = -a*(x - y)
y_dot = c*x - y - x*z
z_dot = -b*z + x*y
return x_dot, y_dot, z_dot
def gen_chaos_seq(m,n):
global x0,y0,z0,a,b,c,N
N=m*n*4
x= np.array((m,n*4))
y= np.array((m,n*4))
z= np.array((m,n*4))
t = np.linspace(0, tmax, N)
f = odeint(lorenz, (x0, y0, z0), t, args=(a, b, c))
x, y, z = f.T
x=x[:(N)]
y=y[:(N)]
z=z[:(N)]
print("ge",z)
return x,y,z
x,y,z=gen_chaos_seq(m,n)
def sequence_indexing(x,y,z):
n=len(x)
fx=np.zeros((n),dtype=np.uint32)
fy=np.zeros((n),dtype=np.uint32)
fz=np.zeros((n),dtype=np.uint32)
seq=sorted(x)
for k1 in range(0,n):
t = x[k1]
k2 = bsearch(seq, t)
fx[k1]=k2
seq=sorted(y)
for k1 in range(0,n):
t = y[k1]
k2 = bsearch(seq, t)
fy[k1]=k2
seq=sorted(z)
for k1 in range(0,n):
t = z[k1]
k2 = bsearch(seq, t)
fz[k1]=k2
print("fx",fx)
return fx,fy,fz
fx,fy,fz=sequence_indexing(x,y,z)
def scramble(fx,fy,fz,b,r,g):
p,q=b.shape
size = p*q
bx=b.reshape(size).astype(str)
gx=g.reshape(size).astype(str)
rx=r.reshape(size).astype(str)
bx_s=np.chararray((size))
gx_s=np.chararray((size))
rx_s=np.chararray((size))
for i in range(size):
idx = fz[i]
bx_s[i] = bx[idx]
for i in range(size):
idx = fy[i]
gx_s[i] = gx[idx]
for i in range(size):
idx = fx[i]
rx_s[i] = rx[idx]
bx_s=bx_s.astype(str)
gx_s=gx_s.astype(str)
rx_s=rx_s.astype(str)
b_s=np.chararray((p,q))
g_s=np.chararray((p,q))
r_s=np.chararray((p,q))
b_s=bx_s.reshape(p,q)
g_s=gx_s.reshape(p,q)
r_s=rx_s.reshape(p,q)
print("bs\n",b_s)
return b_s,g_s,r_s
blue_scrambled,green_scrambled,red_scrambled = scramble(fx,fy,fz,blue_final,red_final,green_final)
def dna_decode(b,g,r):
m,n = b.shape
r_dec= np.ndarray((m,int(n*2)),dtype=np.uint8)
g_dec= np.ndarray((m,int(n*2)),dtype=np.uint8)
b_dec= np.ndarray((m,int(n*2)),dtype=np.uint8)
for color,dec in zip((b,g,r),(b_dec,g_dec,r_dec)):
for j in range(0,m):
for i in range(0,n):
dec[j,2*i]=dna["{0}".format(color[j,i])][0]
dec[j,2*i+1]=dna["{0}".format(color[j,i])][1]
b_dec=(np.packbits(b_dec,axis=-1))
g_dec=(np.packbits(g_dec,axis=-1))
r_dec=(np.packbits(r_dec,axis=-1))
print("oppp",b_dec)
return b_dec,g_dec,r_dec
b,g,r=dna_decode(blue_scrambled,green_scrambled,red_scrambled)
# img,fx,fy,fz,file_path,Mmk,blue,green,red send mmkfrom keyencodemetrix rgb from decomposemetrix
def scramble_new(fx,fy,fz,b,g,r):
print("e to sn")
p,q=b.shape
size = p*q
bx=b.reshape(size)
gx=g.reshape(size)
rx=r.reshape(size)
bx_s=b.reshape(size)
gx_s=g.reshape(size)
rx_s=r.reshape(size)
bx=bx.astype(str)
gx=gx.astype(str)
rx=rx.astype(str)
bx_s=bx_s.astype(str)
gx_s=gx_s.astype(str)
rx_s=rx_s.astype(str)
for i in range(size):
idx = fz[i]
bx_s[idx] = bx[i]
for i in range(size):
idx = fy[i]
gx_s[idx] = gx[i]
for i in range(size):
idx = fx[i]
rx_s[idx] = rx[i]
b_s=np.chararray((p,q))
g_s=np.chararray((p,q))
r_s=np.chararray((p,q))
b_s=bx_s.reshape(p,q)
g_s=gx_s.reshape(p,q)
r_s=rx_s.reshape(p,q)
return b_s,g_s,r_s
def xor_operation_new(b,g,r,mk):
m,n = b.shape
bx=np.chararray((m,n))
gx=np.chararray((m,n))
rx=np.chararray((m,n))
b=b.astype(str)
g=g.astype(str)
r=r.astype(str)
for i in range(0,m):
for j in range (0,n):
bx[i,j] = dna["{0}{1}".format(b[i,j],mk[i,j])]
gx[i,j] = dna["{0}{1}".format(g[i,j],mk[i,j])]
rx[i,j] = dna["{0}{1}".format(r[i,j],mk[i,j])]
bx=bx.astype(str)
gx=gx.astype(str)
rx=rx.astype(str)
print("com the xorn")
return bx,gx,rx
def recover_image(b,g,r,iname):
img = cv2.imread(iname)
img[:,:,2] = r
img[:,:,1] = g
img[:,:,0] = b
cv2.imwrite(("enc.jpg"), img)
print("saved ecrypted image as enc.jpg")
return img
img=recover_image(b,g,r,path)
def decrypt(image,fx,fy,fz,fp,Mk,bt,gt,rt):
red = image[:,:,2]
green = image[:,:,1]
blue = image[:,:,0]
p,q = rt.shape
def dna_encode1(b,g,r):
print("en to ds")
b = np.unpackbits(b,axis=1)
g = np.unpackbits(g,axis=1)
r = np.unpackbits(r,axis=1)
m,n = b.shape
r_enc= np.chararray((m,int(n/2)))
g_enc= np.chararray((m,int(n/2)))
b_enc= np.chararray((m,int(n/2)))
for color,enc in zip((b,g,r),(b_enc,g_enc,r_enc)):
idx=0
for j in range(0,m):
for i in range(0,n,2):
enc[j,idx]=dna["{0}{1}".format(color[j,i],color[j,i+1])]
idx+=1
if (i==n-2):
idx=0
break
b_enc=b_enc.astype(str)
g_enc=g_enc.astype(str)
r_enc=r_enc.astype(str)
print("ddone")
return b_enc,g_enc,r_enc
bn,gn,rn=dna_encode(B,G,R)
print("blue\n",bn)
print("green\n",gn)
print("red \n",rn)
benc,genc,renc=dna_encode1(b,g,r)
bs,gs,rs=scramble_new(fx,fy,fz,benc,genc,renc)
bx,rx,gx=xor_operation_new(bs,gs,rs,Mk)
blue,green,red=dna_decode(bx,gx,rx)
green,red = red, green
img=np.zeros((p,q,3),dtype=np.uint8)
img[:,:,0] = red
img[:,:,1] = green
img[:,:,2] = blue
print("RED\n",red)
print("GREEN\n",green)
print("BLUE\n",blue)
cv2.imwrite(("Recovered.jpg"), img)
decrypt(img,fx,fy,fz,path,mmk,blue,green,red)
mycmd='python3 -m http.server 80'
os.system(mycmd)
#mycmd.terminate()
#wg=' wget http://127.0.0.1:8000/enc.jpg'
#os.system(wg)
#imm=np.array(imm)
#print(type(imm))
'''def split_into_rgb_channels(img):
print(type(img))
print(img)
red = img[:,:,2]
green = img[:,:,1]
blue = img[:,:,0]
return red, green, blue
r,g,b=split_into_rgb_channels(img)
print("r",r)
print("g",g)
print("b",b)'''
def getimg(self):
eimg='torsocks curl -L -o enc.jpg 6jsygadgowg3nst7m5ndywez7hd5rlss2phdxpv44xhucjgowbkc4lid.onion/enc.jpg'
os.system(eimg)
def otherside(self):
#wg=' curl -L -o enc.jpg http://127.0.0.1:80/enc.jpg'
sleee="sleep 5"
os.system(sleee)
cu='torsocks curl -L -o enccc.jpg 6jsygadgowg3nst7m5ndywez7hd5rlss2phdxpv44xhucjgowbkc4lid.onion/Recovered.jpg'
os.system(cu)
cll="clear"
os.system(cll)
os.system(sleee)
#os.system(wg)
'''m1=' curl -L -o mmk.txt http://127.0.0.1:80/mmk.txt'
os.system(m1)
im='curl -L -o img.txt http://127.0.0.1:80/img.txt'
os.system(im)
r1=' curl -L -o rr.txt http://127.0.0.1:80/rr.txt'
os.system(r1)
g1=' curl -L -o gg.txt http://127.0.0.1:80/gg.txt'
os.system(g1)
b1=' curl -L -o bb.txt http://127.0.0.1:80/bb.txt'
os.system(b1)
ffx1=' curl -L -o fx1.txt http://127.0.0.1:80/fx1.txt'
os.system(ffx1)
ffy1=' curl -L -o fy1.txt http://127.0.0.1:80/fy1.txt'
os.system(ffy1)
ffz1=' curl -L -o fz1.txt http://127.0.0.1:80/fz1.txt'
os.system(ffz1)
file=open('mmk.txt','r')
MMK=file.read()
file.close()
print(MMK)
file=open('img.txt','r')
img11=file.read()
file.close()
#iim=list()
imm=list(img11.split())
print(type(imm))
#img=int(img11)
#with open("img.txt", "r") as f:
# for l in f:
# print(sum([int(a) for a in l.split()]))
file=open('rr.txt','r')
rr=file.read()
file.close()
print(rr)
file=open('gg.txt','r')
gg=file.read()
file.close()
print(gg)
file=open('bb.txt','r')
bb=file.read()
file.close()
print(bb)
file=open('fx1.txt','r')
fx1=file.read()
file.close()
print(fx1)
file=open('fy1.txt','r')
fy1=file.read()
file.close()
print(fy1)
file=open('fz1.txt','r')
fz1=file.read()
file.close()
print(fz1)
#[int(i) for i in imm[0].split(',')]
imm=np.array(imm)
print(type(imm))
def split_into_rgb_channels(imm):
print(type(imm))
print(imm)
red = imm[::3]
green = imm[::2]
blue = imm[::1]
return red, green, blue
r,g,b=split_into_rgb_channels(imm)
print("red \n",r)
print("green \n",g)
print("blue \n",b)'''
mb=MyButton(root)
root.mainloop()
|
def ADN(seq):
Y = "est coupable."
N = "n\'est pas coupable."
if "CATA" in seq:
seq1 = seq.replace("CATA","1111")
if "ATGC" in seq1:
return Y
else:
return N
else:
return N
A = "CCTGGAGGGTGGCCCCACCGGCCGAGACAGCGAGCATATGCAGGAAGCGGCAGGAATAAGGAAAAGCAGCADN"
B = "CTCCTGATGCTCCTCGCTTGGTGGTTTGAGTGGACCTCCCAGGCCAGTGCCGGGCCCCTCATAGGAGAGGADN"
C = "AAGCTCGGGAGGTGGCCAGGCGGCAGGAAGGCGCACCCCCCCAGTACTCCGCGCGCCGGGACAGAATGCCADN"
D = "CTGCAGGAACTTCTTCTGGAAGTACTTCTCCTCCTGCAAATAAAACCTCACCCATGAATGCTCACGCAAG"
M = ADN(A)
print("Le Colonel Moutarde",M)
R = ADN(B)
print("Mlle Rose",R)
P = ADN(C)
print("Mme Pervenche",P)
L = ADN(D)
print("M. Leblanc",L) |
from PyQt4 import QtGui
from PyQt4.uic.properties import QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtGui import *
from PyQt4.QtGui import *
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
import matplotlib.pyplot as plt
import random
import os
import sys
import math
from sklearn.cross_validation import train_test_split
fig, ax = plt.subplots()
ax.plot(10, 3, "r", markersize=9, marker=".", alpha=0.2)
ax.plot(9,3, "g", markersize=5, marker="o", alpha=0.2)
plt.show() |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 26 12:24:32 2021
@author: anura
"""
#BMI Calculator application
from pywebio.input import *
from pywebio.output import *
def BMI_CALC():
height=input("Enter the height in cms",type=FLOAT)
weight=input("Enter the weight in kg",type=FLOAT)
bmi=weight/(height/100)**2
bmi_output=[(16,'severely underweight'),(18.5,'underweight'),(25,'normal'),(30,'overweight'),
(35,'moderately obese'),(float('inf'),'severely obese')]
for t1,t2 in bmi_output:
if bmi<=t1:
put_text("your bmi is :%.1f and the person is :%s "%(bmi,t2))
break
if __name__=="__main__":
BMI_CALC() |
import pandas as pd
filename = '../newyork/nyc_subway_weather.csv'
subway_df = pd.read_csv(filename)
def correlation(x, y):
std_x = (x - x.mean()) / x.std(ddof=0)
std_y = (y - y.mean()) / y.std(ddof=0)
return (std_x*std_y).mean()
entries = subway_df['ENTRIESn_hourly']
cum_entries = subway_df['ENTRIESn']
rain = subway_df['meanprecipi']
temp = subway_df['meantempi']
x = pd.Series([1, 2, 3, 4])
y = pd.Series([10, 11, 12, 13])
print correlation(x, y)
"""
print correlation(entries, rain)
print correlation(entries, temp)
print correlation(rain, temp)
print correlation(entries, cum_entries)
""" |
import time, webbrowser
counter = 1
loop_time = 3
while(counter <= 3):
time.sleep(10)
webbrowser.open("https://www.youtube.com/watch?v=5kIe6UZHSXw")
print(counter)
counter = counter + 1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 14:49:27 2020
@author: nerohmot
"""
import struct
from PyQt5 import QtWidgets
from . import command_ABC, command_dialog
class SET_BOOT_SEQUENCE_DIALOG(command_dialog):
def exec(self):
(RAIL1_SEQ, RAIL1_DEL,
RAIL2_SEQ, RAIL2_DEL,
P25V0D_SEQ, P25V0D_DEL,
P17V0D_SEQ, P17V0D_DEL,
N7V0D_SEQ, N7V0D_DEL,
P15V0A_SEQ, P15V0A_DEL,
N15V0A_SEQ, N15V0A_DEL,
P5V0D_SEQ, P5V0D_DEL,
P5V0A_SEQ, P5V0A_DEL,
N5V0A_SEQ, N5V0A_DEL,
P3V3D_SEQ, P3V3D_DEL,
PVLB_SEQ, PVLB_DEL,
P5V0R_SEQ, P5V0R_DEL) = struct.unpack('BBBBBBBBBBBBBBBBBBBBBBBBBB', self.parent.payload)
self.RAIL1_SEQ.setValue(RAIL1_SEQ)
self.RAIL2_SEQ.setValue(RAIL2_SEQ)
self.P25V0D_SEQ.setValue(P25V0D_SEQ)
self.P17V0D_SEQ.setValue(P17V0D_SEQ)
self.N7V0D_SEQ.setValue(N7V0D_SEQ)
self.P15V0A_SEQ.setValue(P15V0A_SEQ)
self.N15V0A_SEQ.setValue(N15V0A_SEQ)
self.P5V0D_SEQ.setValue(P5V0D_SEQ)
self.P5V0A_SEQ.setValue(P5V0A_SEQ)
self.N5V0A_SEQ.setValue(N5V0A_SEQ)
self.P3V3D_SEQ.setValue(P3V3D_SEQ)
self.PVLB_SEQ.setValue(PVLB_SEQ)
self.P5V0R_SEQ.setValue(P5V0R_SEQ)
self.RAIL1_DEL.setValue(RAIL1_DEL)
self.RAIL2_DEL.setValue(RAIL2_DEL)
self.P25V0D_DEL.setValue(P25V0D_DEL)
self.P17V0D_DEL.setValue(P17V0D_DEL)
self.N7V0D_DEL.setValue(N7V0D_DEL)
self.P15V0A_DEL.setValue(P15V0A_DEL)
self.N15V0A_DEL.setValue(N15V0A_DEL)
self.P5V0D_DEL.setValue(P5V0D_DEL)
self.P5V0A_DEL.setValue(P5V0A_DEL)
self.N5V0A_DEL.setValue(N5V0A_DEL)
self.P3V3D_DEL.setValue(P3V3D_DEL)
self.PVLB_DEL.setValue(PVLB_DEL)
self.P5V0R_DEL.setValue(P5V0R_DEL)
super().exec()
def accept(self):
self.parent.payload = struct.pack('BBBBBBBBBBBBBBBBBBBBBBBBBB',
self.RAIL1_SEQ.value(), self.RAIL1_DEL.value(), # RAIL1
self.RAIL2_SEQ.value(), self.RAIL2_DEL.value(), # RAIL2
self.P25V0D_SEQ.value(), self.P25V0D_DEL.value(), # P25V0
self.P17V0D_SEQ.value(), self.P17V0D_DEL.value(), # P17V0
self.N7V0D_SEQ.value(), self.N7V0D_DEL.value(), # N7V0D
self.P15V0A_SEQ.value(), self.P15V0A_DEL.value(), # P15V0
self.N15V0A_SEQ.value(), self.N15V0A_DEL.value(), # N15V0
self.P5V0D_SEQ.value(), self.P5V0D_DEL.value(), # P5V0D
self.P5V0A_SEQ.value(), self.P5V0A_DEL.value(), # P5V0A
self.N5V0A_SEQ.value(), self.N5V0A_DEL.value(), # N5V0A
self.P3V3D_SEQ.value(), self.P3V3D_DEL.value(), # P3V3D
self.PVLB_SEQ.value(), self.PVLB_DEL.value(), # PVLB
self.P5V0R_SEQ.value(), self.P5V0R_DEL.value()) # P5V0R
self.parent.send()
self.done(QtWidgets.QDialog.Accepted)
class SET_BOOT_SEQUENCE(command_ABC):
'''
Description: With this we can configure the boot sequence.
Input: The desired sequence and delay for each rail
| Index | Name | Type | Description |
|:-----:|:-----------|:--------|:-----------------------------------|
|0 | RAIL1_SEQ | uint8 | RAIL1 sequence number |
|1 | RAIL1_DEL | uint8 | Wait 100ms after turning on RAIL1 |
|2 | RAIL2_SEQ | uint8 | RAIL2 sequence number |
|3 | RAIL2_DEL | uint8 | Wait 100ms after turning on RAIL2 |
|4 | P25V0D_SEQ | uint8 | P25V0D sequence number |
|5 | P25V0D_DEL | uint8 | Wait 100ms after turning on P25V0D |
|6 | P17V0D_SEQ | uint8 | P17V0D sequence number |
|7 | P17V0D_DEL | uint8 | Wait 100ms after turning on P17V0D |
|8 | N7V0D_SEQ | uint8 | N7V0D sequence number |
|9 | N7V0D_DEL | uint8 | Wait 100ms after turning on N7V0D |
|10 | P15V0A_SEQ | uint8 | P15V0A sequence number |
|11 | P15V0A_DEL | uint8 | Wait 100ms after turning on P15V0A |
|12 | N15V0A_SEQ | uint8 | N15V0A sequence number |
|13 | N15V0A_DEL | uint8 | Wait 100ms after turning on N15V0A |
|14 | P5V0D_SEQ | uint8 | P5V0D sequence number |
|15 | P5V0D_DEL | uint8 | Wait 100ms after turning on P5V0D |
|16 | P5V0A_SEQ | uint8 | P5V0A sequence number |
|17 | P5V0A_DEL | uint8 | Wait 100ms after turning on P5V0A |
|18 | N5V0A_SEQ | uint8 | N5V0A sequence number |
|19 | N5V0A_DEL | uint8 | Wait 100ms after turning on N5V0A |
|20 | P3V3D_SEQ | uint8 | P3V3D sequence number |
|21 | P3V3D_DEL | uint8 | Wait 100ms after turning on P3V3D |
|22 | PVLB_SEQ | uint8 | PVLB sequence number |
|23 | PVLB_DEL | uint8 | Wait 100ms after turning on PVLB |
|24 | P5V0R_SEQ | uint8 | P5V0R sequence number |
|25 | P5V0R_DEL | uint8 | Wait 100ms after turning on P5V0R |
Output: The new (current) sequence and delay (should match the input)
| Index | Name | Type | Description |
|:-----:|:-----------|:--------|:-----------------------------------|
|0 | RAIL1_SEQ | uint8 | RAIL1 sequence number |
|1 | RAIL1_DEL | uint8 | Wait 100ms after turning on RAIL1 |
|2 | RAIL2_SEQ | uint8 | RAIL2 sequence number |
|3 | RAIL2_DEL | uint8 | Wait 100ms after turning on RAIL2 |
|4 | P25V0D_SEQ | uint8 | P25V0D sequence number |
|5 | P25V0D_DEL | uint8 | Wait 100ms after turning on P25V0D |
|6 | P17V0D_SEQ | uint8 | P17V0D sequence number |
|7 | P17V0D_DEL | uint8 | Wait 100ms after turning on P17V0D |
|8 | N7V0D_SEQ | uint8 | N7V0D sequence number |
|9 | N7V0D_DEL | uint8 | Wait 100ms after turning on N7V0D |
|10 | P15V0A_SEQ | uint8 | P15V0A sequence number |
|11 | P15V0A_DEL | uint8 | Wait 100ms after turning on P15V0A |
|12 | N15V0A_SEQ | uint8 | N15V0A sequence number |
|13 | N15V0A_DEL | uint8 | Wait 100ms after turning on N15V0A |
|14 | P5V0D_SEQ | uint8 | P5V0D sequence number |
|15 | P5V0D_DEL | uint8 | Wait 100ms after turning on P5V0D |
|16 | P5V0A_SEQ | uint8 | P5V0A sequence number |
|17 | P5V0A_DEL | uint8 | Wait 100ms after turning on P5V0A |
|18 | N5V0A_SEQ | uint8 | N5V0A sequence number |
|19 | N5V0A_DEL | uint8 | Wait 100ms after turning on N5V0A |
|20 | P3V3D_SEQ | uint8 | P3V3D sequence number |
|21 | P3V3D_DEL | uint8 | Wait 100ms after turning on P3V3D |
|22 | PVLB_SEQ | uint8 | PVLB sequence number |
|23 | PVLB_DEL | uint8 | Wait 100ms after turning on PVLB |
|24 | P5V0R_SEQ | uint8 | P5V0R sequence number |
|25 | P5V0R_DEL | uint8 | Wait 100ms after turning on P5V0R |
'''
command = 0x03
sub_command = 0x05
payload = struct.pack('BBBBBBBBBBBBBBBBBBBBBBBBBB',
0, 250, # RAIL1
1, 250, # RAIL2
2, 100, # P25V0
2, 100, # P17V0
2, 100, # N7V0D
3, 100, # P15V0
3, 100, # N15V0
4, 100, # P5V0D
4, 100, # P5V0A
4, 100, # N5V0A
5, 100, # P3V3D
0, 100, # PVLB
1, 100) # P5V0R
def __init__(self, parent):
super().__init__(parent)
self.dialog = SET_BOOT_SEQUENCE_DIALOG(__file__, self)
def receive(self, DA, ACK, RXTX, PAYLOAD):
(RAIL1_SEQ, RAIL1_DEL,
RAIL2_SEQ, RAIL2_DEL,
P25V0D_SEQ, P25V0D_DEL,
P17V0D_SEQ, P17V0D_DEL,
N7V0D_SEQ, N7V0D_DEL,
P15V0A_SEQ, P15V0A_DEL,
N15V0A_SEQ, N15V0A_DEL,
P5V0D_SEQ, P5V0D_DEL,
P5V0A_SEQ, P5V0A_DEL,
N5V0A_SEQ, N5V0A_DEL,
P3V3D_SEQ, P3V3D_DEL,
PVLB_SEQ, PVLB_DEL,
P5V0R_SEQ, P5V0R_DEL) = struct.unpack('BBBBBBBBBBBBBBBBBBBBBBBBBB', PAYLOAD)
line = "Set Boot Sequence Reply :"
line += f" RAIL1 sequence # is {RAIL1_SEQ} and delay is {RAIL1_DEL} ms"
line += f" RAIL2 sequence # is {RAIL2_SEQ} and delay is {RAIL2_DEL} ms"
line += f" P25V0D sequence # is {P25V0D_SEQ} and delay is {P25V0D_DEL} ms"
line += f" P17V0D sequence # is {P17V0D_SEQ} and delay is {P17V0D_DEL} ms"
line += f" N7V0D sequence # is {N7V0D_SEQ} and delay is {N7V0D_DEL} ms"
line += f" P15V0A sequence # is {P15V0A_SEQ} and delay is {P15V0A_DEL} ms"
line += f" N15V0A sequence # is {N15V0A_SEQ} and delay is {N15V0A_DEL} ms"
line += f" P5V0D sequence # is {P5V0D_SEQ} and delay is {P5V0D_DEL} ms"
line += f" P5V0A sequence # is {P5V0A_SEQ} and delay is {P5V0A_DEL} ms"
line += f" N5V0A sequence # is {N5V0A_SEQ} and delay is {N5V0A_DEL} ms"
line += f" P3V3D sequence # is {P3V3D_SEQ} and delay is {P3V3D_DEL} ms"
line += f" PVLB sequence # is {PVLB_SEQ} and delay is {PVLB_DEL} ms"
line += f" P5V0R sequence # is {P5V0R_SEQ} and delay is {P5V0R_DEL} ms"
self.write_to_terminal(line)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.