text
stringlengths 8
6.05M
|
|---|
import time, sys
from random import randint
# this will allow the user to enter in X and O into the grid spaces
grid = {1:' ', 2:' ', 3:' ', 4:' ', 5:' ', 6:' ', 7:' ', 8:' ', 9:' '}
# if player has three X's in a row haveWonX will become true triggering the if statement at the bottom of the code
haveWonO = False
haveWonX = False
counter = 0 # the counter counts how many moves there have been. If there has been 9 moves it has to be a tie.
choice = 'Y'
def gameAI():
'''this checks where Ai moves'''
if ((grid[2] == 'X' and grid[3] == 'X') or (grid[4] == 'X' and grid[7] == 'X') or (grid[5] == 'X' and grid[9] == 'X')) and grid[1] != 'O':
return 1
elif ((grid[1] == 'X' and grid[3] == 'X') or (grid[5] == 'X' and grid[8] == 'X')) and grid[2] != 'O':
return 2
elif ((grid[1] == 'X' and grid[2] == 'X') or (grid[6] == 'X' and grid[9] == 'X') or (grid[5] == 'X' and grid[7] == 'X')) and grid[3] != 'O':
return 3
elif ((grid[1] == 'X' and grid[7] == 'X') or (grid[5] == 'X' and grid[6] == 'X')) and grid[4] != 'O':
return 4
elif ((grid[1] == 'X' and grid[9] == 'X') or (grid[2] == 'X' and grid[8] == 'X') or (grid[3] == 'X' and grid[7] == 'X') or (grid[4] == 'X' and grid[6] == 'X')) and grid[5] != 'O':
return 5
elif ((grid[3] == 'X' and grid[9] == 'X') or (grid[4] == 'X' and grid[5] == 'X')) and grid[6] != 'O':
return 6
elif ((grid[1] == 'X' and grid[4] == 'X') or (grid[3] == 'X' and grid[5] == 'X') or (grid[8] == 'X' and grid[9] == 'X')) and grid[7] != 'O':
return 7
elif ((grid[2] == 'X' and grid[5] == 'X') or (grid[7] == 'X' and grid[9] == 'X')) and grid[8] != 'O':
return 8
elif ((grid[1] == 'X' and grid[5] == 'X') or (grid[3] == 'X' and grid[6] == 'X') or (grid[7] == 'X' and grid[8] == 'X')) and grid[9] != 'O':
return 9
while True:
randInt = randint(1, 9)
if grid[randInt] == 'X' or grid[randInt] == 'O':
continue
else:
grid[randInt] = 'O'
break
def drawGrid():
print('\t')
print(' ║ ║ ')
print(' %s ║ %s ║ %s ' % (' ' + grid[1], grid[2] + ' ', grid[3] + ' '))
print(' ║ ║ ')
print(' ══════╬═══════╬═══════')
print(' ║ ║ ')
print(' %s ║ %s ║ %s ' % (' ' + grid[4], grid[5] + ' ', grid[6] + ' '))
print(' ║ ║ ')
print(' ══════╬═══════╬═══════')
print(' ║ ║ ')
print(' %s ║ %s ║ %s ' % (' ' + grid[7], grid[8] + ' ', grid[9] + ' '))
print(' ║ ║ ' + '\n')
while choice[0] != 'N':
print ('\n' + 'This is how the Tic-Tac-Toe grid looks like (starting from position 1 [top-left] all the way to position 9 [bottom-right]):')
drawGrid()
while haveWonO == False or haveWonX == False:
if counter > 8:
break
else:
player = int(input("Player 1's turn. Please input where you want to place the 'X': "))
grid[player] = 'X'
drawGrid()
if ((grid[1] == 'X' and grid[2] == 'X' and grid[3] == 'X')
or (grid[1] == 'X' and grid[4] == 'X' and grid[7] == 'X')
or (grid[1] == 'X' and grid[5] == 'X' and grid[9] == 'X')
or (grid[2] == 'X' and grid[5] == 'X' and grid[8] == 'X')
or (grid[3] == 'X' and grid[6] == 'X' and grid[9] == 'X')
or (grid[3] == 'X' and grid[5] == 'X' and grid[7] == 'X')
or (grid[4] == 'X' and grid[5] == 'X' and grid[6] == 'X')
or (grid[7] == 'X' and grid[8] == 'X' and grid[9] == 'X')):
haveWonX = True
break
counter += 1
if counter > 8:
break
else:
print ("This is the computer's turn.")
print ('\n')
time.sleep(0.3)
sys.stdout.write('Thinking')
time.sleep(0.1)
sys.stdout.write('.')
time.sleep(0.1)
sys.stdout.write('.')
time.sleep(0.1)
sys.stdout.write('.')
time.sleep(0.1)
sys.stdout.write('.')
time.sleep(0.1)
sys.stdout.write('.')
print ('\n')
computer = gameAI()
grid[computer] = 'O'
drawGrid()
if ((grid[1] == 'O' and grid[2] == 'O' and grid[3] == 'O')
or (grid[1] == 'O' and grid[4] == 'O' and grid[7] == 'O')
or (grid[1] == 'O' and grid[5] == 'O' and grid[9] == 'O')
or (grid[2] == 'O' and grid[5] == 'O' and grid[8] == 'O')
or (grid[3] == 'O' and grid[6] == 'O' and grid[9] == 'O')
or (grid[3] == 'O' and grid[5] == 'O' and grid[7] == 'O')
or (grid[4] == 'O' and grid[5] == 'O' and grid[6] == 'O')
or (grid[7] == 'O' and grid[8] == 'O' and grid[9] == 'O')):
haveWonO = True
break
counter += 1
if haveWonO == True:
print ('O has won!')
elif haveWonX == True:
print ('X has won!')
elif counter == 9:
print('It\'s a tie!')
choice = input('Try Again? (Y/N): ').upper()
if choice[0] == 'Y':
haveWonO = False
haveWonX = False
counter = 0
grid = {1:' ', 2:' ', 3:' ', 4:' ', 5:' ', 6:' ', 7:' ', 8:' ', 9:' '}
|
import numpy as np
from sklearn.metrics import roc_auc_score
from scipy.signal import hilbert
from scipy.ndimage.filters import gaussian_filter
from scipy.stats import pearsonr
class CorrCoeffIntervalOptimizer(object):
def __init__(self, max_score_fraction=0.8,
use_abs_for_threshold=True):
self.use_abs_for_threshold = use_abs_for_threshold
self.max_score_fraction = max_score_fraction
def optimize(self, epo):
return optimize_segment_ival(epo,
max_score_fraction=self.max_score_fraction,
use_abs_for_threshold=self.use_abs_for_threshold,
mode="corrcoeff")
class AucIntervalOptimizer(object):
def __init__(self, max_score_fraction=0.8,
use_abs_for_threshold=True):
self.use_abs_for_threshold = use_abs_for_threshold
self.max_score_fraction = max_score_fraction
def optimize(self, epo):
return optimize_segment_ival(epo,
max_score_fraction=self.max_score_fraction,
use_abs_for_threshold=self.use_abs_for_threshold,
mode="auc")
def optimize_segment_ival(epo, max_score_fraction=0.8,
use_abs_for_threshold=True, mode="auc"):
""" Optimizing segment ival following http://ieeexplore.ieee.org/xpls/icp.jsp?arnumber=4408441#app3
(but using auc instead of corrcoef)"""
epo_envelope = np.abs(hilbert(epo.data, axis=1))
epo_smoothed = gaussian_filter(epo_envelope, (0,15,0), order=0, mode='reflect')
labels = epo.axes[0]
# labels should be 0,1 for auc but they may be 1,3 or anything else..
# so convert them to 0/1
assert len(np.unique(labels)) == 2
binary_labels = np.int32(labels == np.max(labels))
# Create blocks of 100 ms length (divided by 10 is same as *100(ms)/1000(ms))
assert epo.fs % 10 == 0
n_samples_per_block = epo.fs / 10
n_samples = len(epo.axes[1])
assert n_samples % n_samples_per_block == 0
n_time_blocks = n_samples // n_samples_per_block
n_chans = len(epo.axes[2])
auc_scores = np.ones((n_time_blocks, n_chans)) * np.nan
for i_time_block in range(n_time_blocks):
for i_chan in range(n_chans):
start_sample = i_time_block * n_samples_per_block
epo_part = epo_smoothed[:,start_sample: start_sample + n_samples_per_block,i_chan]
# auc values indicate good separability if they are close to 0 or close to 1
# subtracting 0.5 transforms them to mean better separability more far away from 0
# this makes later computations easier
if mode =='auc':
score = roc_auc_score(binary_labels, np.sum(epo_part, axis=(1))) - 0.5
else:
assert mode == 'corrcoeff'
score = pearsonr(binary_labels, np.sum(epo_part, axis=(1)))[0]
auc_scores[i_time_block, i_chan] = score
auc_score_chan = np.sum(np.abs(auc_scores), axis=1)
# sort time ivals so that best ival across chans is first
time_blocks_sorted = np.argsort(auc_score_chan)[::-1]
i_best_block = time_blocks_sorted[0]
chan_above_zero = auc_scores[i_best_block, :] > 0
chan_sign = 1 * chan_above_zero + -1 * np.logical_not(chan_above_zero)
sign_adapted_scores = auc_scores * chan_sign
chan_meaned_scores = np.sum(sign_adapted_scores, axis=1)
best_meaned_block = np.argsort(chan_meaned_scores)[::-1][0]
if use_abs_for_threshold:
threshold = (np.sum(chan_meaned_scores[chan_meaned_scores > 0]) *
max_score_fraction)
else:
threshold = np.sum(chan_meaned_scores) * max_score_fraction
t0 = best_meaned_block
t1 = best_meaned_block
# stop if either above threshold or
# there are no timeblocks with positive values left to add
# (this also implies stopping if both indices are at the borders)
while (np.sum(chan_meaned_scores[t0:t1+1]) < threshold and
(np.sum(chan_meaned_scores[:t0] * (chan_meaned_scores[:t0] > 0)
+
np.sum(chan_meaned_scores[t1+1:] * (chan_meaned_scores[t1+1:]> 0)))
> 0)):
if ((np.sum(chan_meaned_scores[:t0]) > np.sum(chan_meaned_scores[t1+1:])
and t0 > 0)
or t1 == n_time_blocks - 1):
t0 = t0 - 1
else:
t1 = t1 + 1
start_sample = t0 * n_samples_per_block
end_sample = (t1 + 1) * n_samples_per_block
start_ms = start_sample * 1000.0 / epo.fs
end_ms = end_sample * 1000.0 / epo.fs
# adjust in case trial was already cut out in some way
# assuming axis 1 is timeaxis
start_ms += epo.axes[1][0]
end_ms += epo.axes[1][0]
return start_ms, end_ms
|
import sqlite3
conn = sqlite3.connect('pakdet.db')
c2 = conn.cursor()
for row in c2.execute("SELECT * FROM PAKDET"):
print(row)
conn.close()
|
from flask import request
from werkzeug.urls import url_encode
def apply_template_globals(app):
@app.template_global()
def modify_query(**new_values):
args = request.args.copy()
for key, value in new_values.items():
if key.endswith('_in_list'):
old_list = args.get(key, '').split(',')
new_list = old_list + value.split(',')
args[key] = ','.join(set([s for s in new_list if s]))
else:
args[key] = value
return '{}?{}'.format(request.path, url_encode(args))
|
#%%
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from IPython.display import Image
import pandas as pd
import numpy as np
import pydotplus
import os
tennis_data = pd.read_csv('playtennis.csv')
# print(tennis_data)
tennis_data.Outlook = tennis_data.Outlook.replace('Sunny',0)
tennis_data.Outlook = tennis_data.Outlook.replace('Overcast',1)
tennis_data.Outlook = tennis_data.Outlook.replace('Rain',2)
tennis_data.Temperature = tennis_data.Temperature.replace('Hot',3)
tennis_data.Temperature = tennis_data.Temperature.replace('Mild',4)
tennis_data.Temperature = tennis_data.Temperature.replace('Cool',5)
tennis_data.Humidity = tennis_data.Humidity.replace('High',6)
tennis_data.Humidity = tennis_data.Humidity.replace('Normal',7)
tennis_data.Wind = tennis_data.Wind.replace('Weak',8)
tennis_data.Wind = tennis_data.Wind.replace('Strong',9)
tennis_data.PlayTennis = tennis_data.PlayTennis.replace('No',10)
tennis_data.PlayTennis = tennis_data.PlayTennis.replace('Yes',11)
# print(tennis_data)
X = np.array(pd.DataFrame(tennis_data, columns=['Outlook','Temperature','Humidity','Wind']))
y = np.array(pd.DataFrame(tennis_data, columns=['PlayTennis']))
X_train, X_test, y_train, y_test = train_test_split(X,y)
dt_clf = DecisionTreeClassifier() #의사결정 트리함수 생성
dt_clf = dt_clf.fit(X_train, y_train) #모델에 변수 입력
dt_prediction = dt_clf.predict(X_test) # X테스트값 예측
# print(confusion_matrix(y_test, dt_prediction)) #오차행렬 계산
# print(classification_report(y_test, dt_prediction)) #분류 측정항목
os.environ['PATH'] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
feature_names = tennis_data.columns.tolist()
feature_names = feature_names[0:4]
target_name = np.array(['Play No', 'Play Yes'])
dt_dot_data = tree.export_graphviz(dt_clf, out_file = None, feature_names= feature_names, class_names = target_name, filled= True, rounded=True, special_characters=True)
dt_graph = pydotplus.graph_from_dot_data(dt_dot_data)
Image(dt_graph.create_png())
#%%
|
##encoding=utf-8
"""
usage:
cd to this directory, run cmd (crawl death record in year 2000):
python dev01_taskplan.py 2 2000
"""
from archives.database import client, task
from archives.metadata import lastname_dict
from archives.urlencoder import urlencoder
from archives.htmlparser import htmlparser
from archives.spider import spider
from archives.fingerprint import fingerprint
import math
import sys
def taskplan(record_type, year):
"""For 18800+ lastnames, get results number for each query of:
{record_type: #record_type, year: #year, lastname: #lastname}
For example:
if there are 25000 records for: record_type = death, year = 2000, lastname = smith, since
the webpage display 1000 records per page, so we create pagenumber from 1 to 25 for this
query. and save it in mongodb database (db = archives, collection = task) like this:
{_id: md5 string, type: 2, lastname_id: 0, year: 2000, nth: 1, flag: false}
... nth + 1
Afterwards, the crawler gonna go through all these pages, once it's done with one page, then
it gonna change the flag to true, so the crawler are never gonna crawl that again.
For more information about task plan data model, see archives.database.py
[args]
------
record_type:
1. birth record
2. death record
3. marriage record
4. divorce record
year: 4 digits year
"""
for lastname_id, lastname in lastname_dict.items():
# check if we did this record_type, lastname_id, year combination before
_id = fingerprint.of_text("%s_%s_%s_%s" % (record_type, lastname_id, year, 1) )
if task.find({"_id": _id}).count() == 0: # only do it when we never do it
print("processing type=%s, lastname=%s in %s ..." % (record_type, lastname, year))
if record_type == 1:
url = urlencoder.url_birth_record(lastname, year, 10, 1)
elif record_type == 2:
url = urlencoder.url_death_record(lastname, year, 10, 1)
elif record_type == 3:
url = urlencoder.url_marriage_record(lastname, year, 10, 1)
elif record_type == 4:
url = urlencoder.url_divorce_record(lastname, year, 10, 1)
html = spider.html(url)
if html:
try:
num_of_records = htmlparser.get_total_number_of_records(html)
max_pagenum = int(math.ceil(float(num_of_records)/1000)) # calculate how many page we should crawl
print("\tWe got %s pages to crawl" % max_pagenum)
for pagenum in range(1, max_pagenum+1):
doc = {"_id": fingerprint.of_text("%s_%s_%s_%s" % (record_type, lastname_id, year, pagenum) ),
"type": record_type,
"lastname_id": lastname_id,
"year": year,
"nth": pagenum,
"flag": False}
try:
task.insert(doc)
except:
pass
except:
pass
else:
print("\tFailed to get html")
if __name__ == "__main__":
# record_type, year = int(sys.argv[1]), int(sys.argv[2])
# record_type, year = 2, 2007
for record_type in [1,2,3,4]:
taskplan(record_type, 2014)
# client.close()
|
# Author: ambiguoustexture
# Date: 2020-03-11
file_result_w2v = './stuffs_92/result_w2v.txt'
file_result_PC = './stuffs_92/result_PC.txt'
with open(file_result_w2v) as result_w2v:
count, total = 0, 0
for line in result_w2v:
cols = line.split(' ')
total += 1
if cols[3] == cols[4]:
count += 1
print('Accuracy of word2vec model:', count / total)
with open(file_result_PC) as result_PC:
count, total = 0, 0
for line in result_PC:
cols = line.split(' ')
total += 1
if cols[3] == cols[4]:
count += 1
print('Accuracy of PCA model:', count / total)
|
#!/usr/bin/env python
"""
See cubeplt.py for 3d plotting of the cubes
"""
import numpy as np
def make_pyvista_indices(indices):
"""
:param indices: (nface,3) triangles OR (nface,4) quads
:return ii: vista type list
"""
sh = list(indices.shape)
last = sh[-1]
assert last in (3,4)
sh[-1] = last+1
ii = np.zeros(sh, dtype=np.int32)
ii[:,1:] = indices
ii[:,0] = last
return ii
def make_cube_oxyz(oxyz):
"""
:param oxyz: (4,3) : origin and orthogonal surrounding points nominally
assumed in +X,+Y,+Z directions relative to first point
(YZ) (XYZ)
6.........7
/. /|
/ . / |
/ . / |
3.........5 |
| . | |
| 2.....|...4
Z | / | /
| / Y | /
|/ |/
0---------1
X
"""
o = oxyz[0]
v = oxyz[1:] - o # three : assumed orthogonal base vectors
verts = np.zeros([8,3], dtype=np.float32)
verts[:4] = oxyz
verts[4] = o + v[0] + v[1] # XY
verts[5] = o + v[0] + v[2] # XZ
verts[6] = o + v[1] + v[2] # YZ
verts[7] = o + v[0] + v[1] + v[2] # XYZ
indices = np.array([
[0,1,5,3], # thumb-out (outwards normal)
[1,4,7,5], # thumb-right (outwards normal)
[4,2,6,7], # thumb-back (outwards normal)
[0,3,6,2], # thumb-left (outwards normal)
[0,2,4,1], # thumb-down (outwards normal)
[5,7,6,3]], # thumb-up (outwards normal)
dtype=np.int32)
pv_indices = make_pyvista_indices(indices)
faces = verts[indices]
assert verts.shape == (8,3)
assert indices.shape == (6,4)
assert pv_indices.shape == (6,5)
assert faces.shape == (6,4,3)
return verts, faces, pv_indices
def make_cube_bbox(bbox):
"""
:param bbox: (2,3) mi,mx
mx
(YZ) (XYZ)
6.........7
/. /|
/ . / |
/ . / |
3.........5 |
| . | |
| 2.....|...4
Z | / | /
| / Y | /
|/ |/
0---------1
mi X
"""
assert bbox.shape == (2,3)
mi, mx = bbox
verts = np.zeros([8,3], dtype=np.float32)
verts[0] = [mi[0], mi[1], mi[2]] # ---
verts[1] = [mx[0], mi[1], mi[2]] # X--
verts[2] = [mi[0], mx[1], mi[2]] # -Y-
verts[3] = [mi[0], mi[1], mx[2]] # --Z
verts[4] = [mx[0], mx[1], mi[2]] # XY-
verts[5] = [mx[0], mi[1], mx[2]] # X-Z
verts[6] = [mi[0], mx[1], mx[2]] # -YZ
verts[7] = [mx[0], mx[1], mx[2]] # XYZ
indices = np.array([
[0,1,5,3], # thumb-out (outwards normal)
[1,4,7,5], # thumb-right (outwards normal)
[4,2,6,7], # thumb-back (outwards normal)
[0,3,6,2], # thumb-left (outwards normal)
[0,2,4,1], # thumb-down (outwards normal)
[5,7,6,3]], # thumb-up (outwards normal)
dtype=np.int32)
pv_indices = make_pyvista_indices(indices)
faces = verts[indices]
assert verts.shape == (8,3)
assert indices.shape == (6,4)
assert pv_indices.shape == (6,5)
assert faces.shape == (6,4,3)
return verts, faces, pv_indices
def make_cube(cube):
"""
:param cube: shape of either (4,3) or (2,3)
"""
if cube.shape == (4,3):
return make_cube_oxyz(cube)
elif cube.shape == (2,3):
return make_cube_bbox(cube)
else:
assert 0, ("cube specification array not handled", cube.shape)
pass
if __name__ == '__main__':
oxyz = np.array([(0,0,0), (100,0,0), (0,100,0), (0,0,100)], dtype=np.float32)
bbox = np.array([(0,0,0),(100,100,100)], dtype=np.float32)
points0, faces0, indices0 = make_cube(oxyz)
points1, faces1, indices1 = make_cube(bbox)
assert np.all( points0 == points1 )
assert np.all( faces0 == faces1 )
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import numpy as np
import zipcodes
from make_data import *
from train_model import *
from predict import *
from decrease_price import *
import pickle
inputs1 = {
'price': [221900, 538000, 180000, 604000, 510000],
'city': ['phx', 'phx', 'phx', 'phx', 'phx'],
'bedrooms': [2,2,3,3,2],
'bathrooms': [2, 2, 3, 2, 1],
'floors': [1, 2, 1, 2, 3],
'waterfront': [0, 0, 0, 0, 1],
'condition': [2, 3, 4, 1, 4],
'sqft_basement': [1, 0,1, 0, 1],
'yr_built': [1990, 1934, 1955, 1920, 2005],
'yr_renovated': [0,0,0,1,1],
'lot_log': [5650, 7242, 6000, 9780, 9500],
}
inputs2 = {
'price': [257500, 310000, 485000, 385000, 233000],
'city': ['hou', 'hou', 'hou', 'hou', 'hou'],
'bedrooms': [2,1,3,2,3],
'bathrooms': [1, 2, 2, 3, 2],
'floors': [3, 1, 2, 3, 1],
'waterfront': [0, 1, 0, 0, 1],
'condition': [3,4, 5, 2, 3],
'sqft_basement': [1,0,0,1,0],
'yr_built': [2007, 2012, 2001, 1999, 2000],
'yr_renovated': [0,1,0,1,1],
'lot_log': [5000, 6500, 3300, 2300, 5400],
}
df1= pd.DataFrame(data=inputs1)
df2= pd.DataFrame(data=inputs2)
final = df1.append(df2)
yDict = [df1['price'], df2['price']] #dictionary of y
xDict = [df1[['bedrooms', 'bathrooms',"floors", "waterfront", "condition", "sqft_basement", "yr_built", "yr_renovated", "lot_log"]],df2[['bedrooms', 'bathrooms',"floors", "waterfront", "condition", "sqft_basement", "yr_built", "yr_renovated", "lot_log"]]] #dictionary of X
#tests for make_data
def test_append_cityNames():
"""Tests the append_cityNames function in make_data to ensure cities are properly appended."""
zip_data = pd.DataFrame(columns=['price', 'zipcode'])
prices = [1,2,3]
zipcodes1 = [85258, 60201, 93953]
zip_data['price'] = prices
zip_data['zipcode'] = zipcodes1
city_result = append_cityNames(zip_data, 'zipcode', 'city')
assert type(city_result) is pd.core.frame.DataFrame
assert city_result.shape == (3,3)
def test_choose_features():
"""Tests the choose_features function in make_data to ensure subsetting of dataframe is correct."""
#city_result = append_cityNames(data, 'zipcode', 'city')
columns = ["bedrooms", "bathrooms", "city"]
feature_result = choose_features(final, columns)
assert type(feature_result) is pd.core.frame.DataFrame
assert feature_result.shape == (10, 3)
def test_log_variable():
"""Tests the log_variable function in make_data to ensure variables are correctly being logged."""
df = pd.DataFrame(columns=['col1'])
list1 = [1,2,3]
df['col1'] = list1
list2 = [np.log(1), np.log(2), np.log(3)]
log_result = log_variable(df, 'logged', 'col1')
print(log_result)
assert log_result['logged'].tolist() == list2
def test_binary_var():
"""Tests the binary_var function in make_data to ensure variables are correctly being turned into binary variables"""
df = pd.DataFrame(columns=['price', 'pools'])
list1 = [1, 6, 4, 2.4, 0]
list2 = [0, 4.5, 0, 1, 6]
df['price']= list1
df['pools']=list2
results_binary = create_binary_var(df, 'pools')
assert sum(((df['pools']==1) | (df['pools']==0))) == len(results_binary['pools'])
#tests for train_model
def test_splitDF_cities():
"""Tests the splitDF_cities function to make sure the dataframe is being split by city name."""
#feature_result = choose_features(data, columns)
cities = ['phx', 'hou']
x = final[['bedrooms', 'bathrooms', 'city']]
split_result = splitDF_cities(x, cities)
assert type(split_result) is list
assert len(split_result) == len(cities)
def test_get_target():
"""Tests the get_target function to make sure the correct target is being returned."""
#feature_result = choose_features(data, columns)
#split_result = splitDF_cities(feature_result, cities)
cities = ['phx', 'hou']
split_result = [df1, df2]
target_result = get_target(split_result, 'price')
assert type(target_result) is list
assert len(target_result) == len(cities)
def test_split_data():
cities = ['phx', 'hou']
split_result = split_data(xDict, yDict)
assert type(split_result) is tuple
assert len(split_result) == len(cities) #has city lengh lists
assert len(split_result[0]) == 2 #has test and train
def test_model_train():
cities = ['phx', 'hou']
d = {'model_train': {
'choose_features': {
#'num_years': final,
'columns': ["city", "bedrooms", "bathrooms","floors", "waterfront", "condition", "sqft_basement", "yr_built", "yr_renovated", "lot_log"]},
'split_data':{
'train_size': 0.7,
'test_size': 0.3
}
}}
models, finalxDict, finalyDict = model_train(xDict, yDict,**d['model_train'])
with open('unitTest_model.pkl', "wb") as f:
pickle.dump(models, f)
assert len(models) == len(cities)
assert type(models[0]) is sklearn.linear_model.base.LinearRegression
def test_model_score():
with open('unitTest_model.pkl', 'rb') as f:
models = pickle.load(f)
cities = ['phx', 'hou']
x, y = split_data(xDict, yDict)
scoring = model_score (models, x, y, cities)
assert type(scoring) is pd.core.frame.DataFrame
assert scoring.shape == (len(cities), 2)
def test_format_coefs():
cities = ['phx', 'hou']
columns = ["bedrooms", "bathrooms", "floors", "waterfront", "condition", "sqft_basement", "yr_built", "yr_renovated", "lot_log"]
with open('unitTest_model.pkl', 'rb') as f:
models = pickle.load(f)
coefs = format_coefs(models, columns, cities)
assert type(coefs) is pd.core.frame.DataFrame
assert coefs.shape == (len(cities), len(columns))
## tests for application scripts
def test_input_prediction():
""" Tests the prediction function from app.predict.py to ensure user inputs are predicted correctly"""
with open('unitTest_model.pkl', 'rb') as f:
models = pickle.load(f)
predict_result= prediction(models, 0, 4, 5, 3, 0, 4, 0, 1950, 0, 10000)
assert type(predict_result) is str
def test_dec_price():
""" Tests the decrease price function from app.decrease_price.py to ensure the correct output is being made"""
with open('unitTest_model.pkl', 'rb') as f:
models = pickle.load(f)
test_items, test_prices = dec_price(models, 0, 4, 5, 3, 0, 4, 0, 1950, 0, 10000)
assert type(test_items) is list
assert type(test_prices) is list
assert len(test_items) == 2
assert len(test_prices) == 8
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class EvtgenToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('evtgen')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['evtgen'].version
values['PFX'] = spec['evtgen'].prefix
fname = 'evtgen.xml'
contents = str("""
<tool name="evtgen" version="${VER}">
<lib name="EvtGen"/>
<lib name="EvtGenExternal"/>
<client>
<environment name="EVTGEN_BASE" default="${PFX}"/>
<environment name="LIBDIR" default="$$EVTGEN_BASE/lib"/>
<environment name="INCLUDE" default="$$EVTGEN_BASE/include"/>
</client>
<runtime name="EVTGENDATA" value="$$EVTGEN_BASE/share"/>
<use name="hepmc"/>
<use name="pythia8"/>
<use name="tauolapp"/>
<use name="photospp"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
import json
import csv
import numpy as np
import matplotlib.pyplot as plt
import os
import argparse
#parser for trails
#use --trial trial_() to run it
parser = argparse.ArgumentParser(description='Read Trial')
parser.add_argument('--trial', type=str,
help='Trial Number')
args = parser.parse_args()
#different condition of images
condition = ['big_', 'small_', 'light_', 'dark_']
#classes of product
classList = ['bubly', 'clinique', 'echo', 'lotion', 'micellar', 'parm', 'protein', 'redbull', 'shade', 'skin', 'tory' ]
trial = args.trial
""" Configure Paths"""
#to generate test_image.txt and calcualte the percentage of the indicated condition
dir_path = os.path.dirname(os.path.realpath("./*"))
def gentestList(con):
tot_cnt = 0
con_cnt =0
labelpath = dir_path+"/dataset_eval/"
imgpath = dir_path+"/dataset_eval/"
txt_list = os.listdir(imgpath)
for txt_name in txt_list:
if not("jpg" in txt_name):
continue
tot_cnt+=1
if not(con in txt_name):
continue
con_cnt+=1
img_filename = txt_name
img_path = imgpath + img_filename
iopen = open(dir_path+"/test_images.txt", "a")
iopen.write(img_path+"\n")
return con_cnt/tot_cnt, tot_cnt
#calaulate the perventahe of each class
def count(con):
tot_cnt = 0
con_cnt =0
labelpath = dir_path+"/dataset_eval/"
imgpath = dir_path+"/dataset_eval/"
txt_list = os.listdir(imgpath)
for txt_name in txt_list:
if not("jpg" in txt_name):
continue
tot_cnt+=1
if not(con in txt_name):
continue
con_cnt+=1
return con_cnt/tot_cnt
#for con in condition:
#Open txt
def mAPList(condition, p):
mAPlist = []
perclist = []
for con in condition:
#create test image list for this condition
if os.path.isfile('test_images.txt'):
os.remove('test_images.txt')
perc, _ =gentestList(con)
#run darknet under this conditions and p value
os.system(dir_path+'/darknet detector map dataset_eval.data /home/vickicv/Desktop/Versioning_test/Trials/{}/yolov4-tiny-vicki-1.cfg /home/vickicv/Desktop/Versioning_test/Trials/{}/weights/yolov4-tiny-vicki-1_best.weights -iou_thresh 0.{} ->result.txt'.format(trial,trial,p))
f = open ('result.txt','r')
m = f.readlines()
for line in m:
if ('mean average precision (mAP@0.'+ p + ')') in line:
first = line.split("=")[1]
mAP = float(first.split(',')[0])*100
mAPlist.append(mAP)
perclist.append(perc)
return mAPlist,perclist
def mAPList_class(condition, p):
mAPlist = []
perclist = []
#create test image list for this condition
if os.path.isfile('test_images.txt'):
os.remove('test_images.txt')
_, tot_cnt =gentestList("")
os.system(dir_path+'/darknet detector map dataset_eval.data /home/vickicv/Desktop/Versioning_test/Trials/{}/yolov4-tiny-vicki-1.cfg /home/vickicv/Desktop/Versioning_test/Trials/{}/weights/yolov4-tiny-vicki-1_best.weights -iou_thresh 0.{} ->result.txt'.format(trial,trial,p))
#run darknet under this conditions and p value
f = open ('result.txt','r')
m = f.readlines()
for line in m:
if 'class_id' in line:
for i in range(len(condition)):
if condition[i] in line:
mAP = float(line.split('=')[3].split('%')[0])
#print(mAP)
mAPlist.append(mAP)
for i in range(len(condition)):
perclist.append(count(condition[i]))
return mAPlist,perclist
def mAP_all(p):
mAP = 0
#create test image list for this condition
if os.path.isfile('test_images.txt'):
os.remove('test_images.txt')
_, _ =gentestList("")
#run darknet under this conditions and p value
os.system(dir_path+'/darknet detector map dataset_eval.data /home/vickicv/Desktop/Versioning_test/Trials/{}/yolov4-tiny-vicki-1.cfg /home/vickicv/Desktop/Versioning_test/Trials/{}/weights/yolov4-tiny-vicki-1_best.weights -iou_thresh 0.{} ->result.txt'.format(trial,trial,p))
f = open ('result.txt','r')
m = f.readlines()
for line in m:
if ('mean average precision (mAP@0.'+ p + ')') in line:
first = line.split("=")[1]
mAP = float(first.split(',')[0])*100
return mAP
#extract mAP from result.txt for each condition
mAP25,perclist = mAPList(condition, '25')
mAP50,_ = mAPList(condition, '50')
mAP75,_ = mAPList(condition, '75')
#extract mAP from result.txt for each class
mAP_class_25, perclist_class = mAPList_class(classList, '25')
mAP_class_50, _ = mAPList_class(classList, '50')
mAP_class_75, _ = mAPList_class(classList, '75')
path = dir_path+'/output_{}.csv'.format(trial)
#write CSV file
with open(path, 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['condition', 'percentage', 'mAP@25' , 'mAP@50', 'mAP@75'])
writer.writerow(['all', '100', mAP_all('25') , mAP_all('50'), mAP_all('75')])
for i in range(len(condition)):
writer.writerow([condition[i], perclist[i] , mAP25[i], mAP50[i], mAP75[i]])
for i in range(len(classList)):
writer.writerow([classList[i], perclist_class[i], mAP_class_25[i], mAP_class_50[i], mAP_class_75[i]])
|
"""inventoryproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from user import views as user_view
from django.contrib.auth import views as auth_views
from django.conf.urls.static import static
from django.conf import *
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('dashboardapp.urls')),
path('register/',user_view.register,name='user-register'),
path('',auth_views.LoginView.as_view(template_name='user/login.html'),name="user-login"),
# here we get LoginView Class from view package and make our own template means login.html and
#in this {{form}} means form django-admin
path('profile/',user_view.profile,name='user-profile'),
path('logout/',auth_views.LogoutView.as_view(template_name='user/logout.html'),name="user-logout"),
path('password_reset/',auth_views.PasswordResetView.as_view(template_name='user/password_reset.html'),name='password_reset'),
#here password_reset is my own customize template
path('password_reset_done/',auth_views.PasswordResetDoneView.as_view(template_name='user/password_reset_done.html'),name='password_reset_done'),
path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name='user/password_reset_confirm.html'),name="password_reset_confirm"),
path('password_reset_complete/',auth_views.PasswordResetCompleteView.as_view(template_name='user/password_reset_complete.html'),name="password_reset_complete")
#here PasswordResetView is an built in class django-admin
#if you use password resset of django admin then PasswordResetView,PasswordResetDoneView,PasswordResetConfirmView,PasswordResetCompleteView compulsory required
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from models.DiscordExtension import DiscordExtension
from models.ExecutionContext import ExecutionContext
from player import Player
import asyncio
import discord
import logging
class AliasExtension(DiscordExtension):
def __init__(self, configRepo):
self.configRepo = configRepo
super().__init__()
@property
def name(self):
return 'Aliases'
def isserving(self, ctx: ExecutionContext):
return ctx.cmd in ['alias', 'alias-remove']
async def execute(self, ctx: ExecutionContext):
cmd = ctx.cmd
arg = ctx.arg
if (cmd == 'alias'):
separator = arg.find(' ')
if (separator == -1):
await ctx.send_message('Wrong alias syntax. Use "alias <alias> <replacer>"')
return
alias = arg[:separator].strip()
replacer = arg[separator + 1:].strip()
self.configRepo.add_alias(alias, replacer)
await ctx.send_message(f'Alias "{alias}" has been successfully added')
else:
if (ctx.isadmin):
self.configRepo.remove_alias(arg)
await ctx.send_message(f'Alias "{ctx.arg}" was successfully removed')
else:
await ctx.send_message('Only admin users can remove aliases')
def list_commands(self, ctx: ExecutionContext):
array = ['alias <alias> <replacer>']
aliases = 'list: '
for alias in self.configRepo.get_aliases():
aliases += f' {alias[0]}'
array.append(aliases)
array.append('alias-remove <alias>')
return array
def list_emojis(self):
return []
def emoji_to_command(self, emoji: str):
return None
async def initialize(self, bot):
pass
def dispose(self):
pass
|
from ..FeatureExtractor import FeatureExtractor
from numpy import median as med
from common_functions.plot_methods import plot_horizontal_line
class medianextractor(plot_horizontal_line,FeatureExtractor):
active = True
extname = 'median' #extractor's name
def extract(self):
try:
median = float(med(self.flux_data))
except:
self.ex_error("EXCEPT in medianextractor() most likely flux_data=[]")
return(median)
|
"""relaydomains unit tests."""
from django.core.files.base import ContentFile
from django.test import TestCase
from django.urls import reverse
from modoboa.admin import factories as admin_factories, models as admin_models
from modoboa.core.factories import UserFactory
from modoboa.lib.test_utils import MapFilesTestCaseMixin
from modoboa.lib.tests import ModoTestCase
from modoboa.limits import utils as limits_utils
from modoboa.transport import factories as tr_factories, models as tr_models
from . import models
class Operations(object):
def _create_relay_domain(self, name, status=200, **kwargs):
values = {
"name": name,
"create_dom_admin": False,
"type": "relaydomain",
"service": "relay",
"relay_target_host": "external.host.tld",
"relay_target_port": 25,
"enabled": True,
"stepid": "step3",
"quota": 0,
"default_mailbox_quota": 0
}
values.update(kwargs)
return self.ajax_post(
reverse("admin:domain_add"),
values, status
)
def _relay_domain_alias_operation(self, optype, domain, name, status=200):
transport = tr_models.Transport.objects.get(pattern=domain.name)
values = {
"name": domain.name,
"service": "relay",
"relay_target_host": transport._settings["relay_target_host"],
"relay_target_port": transport._settings["relay_target_port"],
"type": "relaydomain",
"quota": domain.quota,
"default_mailbox_quota": domain.default_mailbox_quota
}
aliases = [alias.name for alias in domain.domainalias_set.all()]
if optype == "add":
aliases.append(name)
else:
aliases.remove(name)
for cpt, alias in enumerate(aliases):
fname = "aliases" if not cpt else "aliases_%d" % cpt
values[fname] = alias
return self.ajax_post(
reverse("admin:domain_change",
args=[domain.id]),
values, status
)
def _check_limit(self, name, curvalue, maxvalue):
limit = self.user.userobjectlimit_set.get(name=name)
self.assertEqual(limit.current_value, curvalue)
self.assertEqual(limit.max_value, maxvalue)
class RelayDomainsTestCase(ModoTestCase, Operations):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super(RelayDomainsTestCase, cls).setUpTestData()
admin_factories.populate_database()
cls.transport = tr_factories.TransportFactory(
pattern="relaydomain.tld", service="relay",
_settings={
"relay_target_host": "external.host.tld",
"relay_target_port": "25",
"relay_verify_recipients": False
}
)
cls.dom = admin_factories.DomainFactory(
name="relaydomain.tld", type="relaydomain",
transport=cls.transport)
admin_factories.DomainAliasFactory(
name="relaydomainalias.tld", target=cls.dom)
admin_factories.MailboxFactory(
domain=cls.dom, address="local",
user__username="local@relaydomain.tld",
user__groups=("SimpleUsers", )
)
def test_domain_list_view(self):
"""Make sure relaydomain is listed."""
url = reverse("admin:_domain_list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
content = response.json()
self.assertIn("relaydomain.tld", content["rows"])
self.assertIn("Relay domain", content["rows"])
def test_create_relaydomain(self):
"""Test the creation of a relay domain.
We also check that unique constraints are respected: domain,
relay domain alias.
FIXME: add a check for domain alias.
"""
self._create_relay_domain("relaydomain1.tld")
transport = tr_models.Transport.objects.get(pattern="relaydomain1.tld")
self.assertEqual(
transport._settings["relay_target_host"], "external.host.tld")
self.assertEqual(
transport._settings["relay_verify_recipients"], False)
resp = self._create_relay_domain("test.com", 400)
self.assertEqual(resp["form_errors"]["name"][0],
"Domain with this Name already exists.")
resp = self._create_relay_domain("relaydomainalias.tld", 400)
self.assertEqual(
resp["form_errors"]["name"][0],
"A domain alias with this name already exists"
)
def test_create_relaydomainalias(self):
"""Test the creation of a relay domain alias.
We also check that unique constraints are respected: domain,
relay domain.
FIXME: add a check for domain alias.
"""
self._relay_domain_alias_operation(
"add", self.dom, "relaydomainalias1.tld"
)
resp = self._relay_domain_alias_operation(
"add", self.dom, "test.com", 400
)
self.assertEqual(
resp["form_errors"]["aliases_2"][0],
"A domain with this name already exists"
)
resp = self._relay_domain_alias_operation(
"add", self.dom, self.dom.name, 400
)
self.assertEqual(
resp["form_errors"]["aliases_2"][0],
"A domain with this name already exists"
)
def test_edit_relaydomain(self):
"""Test the modification of a relay domain.
Rename 'relaydomain.tld' domain to 'relaydomain.org'
"""
values = {
"name": "relaydomain.org",
"service": "relay",
"relay_target_host": self.transport._settings["relay_target_host"],
"relay_target_port": 4040,
"relay_verify_recipients": True,
"type": "relaydomain",
"enabled": True,
"quota": 0, "default_mailbox_quota": 0
}
self.ajax_post(
reverse("admin:domain_change", args=[self.dom.id]),
values
)
self.transport.refresh_from_db()
self.assertEqual(
self.transport._settings["relay_target_port"], 4040)
self.assertTrue(
models.RecipientAccess.objects.filter(
pattern=values["name"]).exists())
values["relay_verify_recipients"] = False
self.ajax_post(
reverse("admin:domain_change", args=[self.dom.id]),
values
)
self.assertFalse(
models.RecipientAccess.objects.filter(
pattern=values["name"]).exists())
def test_relaydomain_domain_switch(self):
"""Check domain <-> relaydomain transitions."""
domain_pk = self.dom.pk
values = {
"name": "relaydomain.tld",
"type": "domain",
"quota": 0,
"default_mailbox_quota": 0,
"enabled": True,
"service": "relay",
"relay_target_host": self.transport._settings["relay_target_host"],
"relay_target_port": self.transport._settings["relay_target_port"]
}
self.ajax_post(
reverse("admin:domain_change", args=[domain_pk]), values)
with self.assertRaises(tr_models.Transport.DoesNotExist):
self.transport.refresh_from_db()
self.dom.refresh_from_db()
self.assertEqual(self.dom.type, "domain")
values = {
"name": "relaydomain.tld",
"type": "relaydomain",
"enabled": True,
"quota": 0,
"default_mailbox_quota": 0
}
self.ajax_post(
reverse("admin:domain_change", args=[domain_pk]), values)
self.assertEqual(
admin_models.Domain.objects.get(name="relaydomain.tld").type,
"relaydomain")
def test_edit_relaydomainalias(self):
"""Test the modification of a relay domain alias.
Rename 'relaydomainalias.tld' domain to 'relaydomainalias.net'
"""
values = {
"name": "relaydomain.org",
"service": "relay",
"relay_target_host": self.transport._settings["relay_target_host"],
"relay_target_port": self.transport._settings["relay_target_port"],
"type": "relaydomain",
"aliases": "relaydomainalias.net",
"quota": 0, "default_mailbox_quota": 0
}
self.ajax_post(
reverse("admin:domain_change", args=[self.dom.id]),
values
)
admin_models.DomainAlias.objects.get(name="relaydomainalias.net")
with self.assertRaises(admin_models.DomainAlias.DoesNotExist):
admin_models.DomainAlias.objects.get(name="relaydomainalias.tld")
def test_delete_relaydomain(self):
"""Test the removal of a relay domain."""
self.ajax_post(
reverse("admin:domain_delete", args=[self.dom.id]),
{}
)
with self.assertRaises(tr_models.Transport.DoesNotExist):
self.transport.refresh_from_db()
def test_delete_recipientaccess(self):
"""Test the removal of a recipient access."""
self.transport._settings["relay_verify_recipients"] = True
self.transport.save()
self.ajax_post(
reverse("admin:domain_delete", args=[self.dom.id]),
{}
)
self.assertFalse(
models.RecipientAccess.objects.filter(
pattern=self.transport.pattern).exists())
def test_alias_on_relaydomain(self):
"""Create an alias on a relay domain."""
values = {
"address": "alias@relaydomain.tld",
"recipients": "recipient@relaydomain.tld",
"enabled": True
}
self.ajax_post(reverse("admin:alias_add"), values)
self.assertTrue(
admin_models.Alias.objects.filter(
address="alias@relaydomain.tld").exists())
values = {
"address": "alias2@relaydomain.tld",
"recipients": "local@relaydomain.tld",
"enabled": True
}
self.ajax_post(reverse("admin:alias_add"), values)
self.assertTrue(
admin_models.Alias.objects.filter(
address="alias2@relaydomain.tld").exists())
class ImportTestCase(ModoTestCase):
"""Test import."""
def test_webui_import(self):
"""Check if import from webui works."""
f = ContentFile("relaydomain;relay.com;127.0.0.1;25;relay;True;True",
name="domains.csv")
self.client.post(
reverse("admin:domain_import"), {
"sourcefile": f
}
)
self.assertTrue(
admin_models.Domain.objects.filter(
name="relay.com", type="relaydomain").exists())
class LimitsTestCase(ModoTestCase, Operations):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super(LimitsTestCase, cls).setUpTestData()
for name, _definition in limits_utils.get_user_limit_templates():
cls.localconfig.parameters.set_value(
"deflt_user_{0}_limit".format(name), 2, app="limits")
cls.localconfig.save()
cls.user = UserFactory.create(
username="reseller", groups=("Resellers",)
)
def setUp(self):
"""Initialize test."""
super(LimitsTestCase, self).setUp()
self.client.force_login(self.user)
def test_relay_domains_limit(self):
self._create_relay_domain(
"relaydomain1.tld", quota=1, default_mailbox_quota=1)
self._check_limit("domains", 1, 2)
self._create_relay_domain(
"relaydomain2.tld", quota=1, default_mailbox_quota=1)
self._check_limit("domains", 2, 2)
self._create_relay_domain("relaydomain3.tld", 403)
self._check_limit("domains", 2, 2)
domid = admin_models.Domain.objects.get(name="relaydomain2.tld").id
self.ajax_post(
reverse("admin:domain_delete", args=[domid]), {})
self._check_limit("domains", 1, 2)
def test_relay_domain_aliases_limit(self):
self._create_relay_domain(
"relaydomain1.tld", quota=1, default_mailbox_quota=1)
domain = admin_models.Domain.objects.get(name="relaydomain1.tld")
self._relay_domain_alias_operation(
"add", domain, "relay-domain-alias1.tld"
)
self._check_limit("domain_aliases", 1, 2)
self._relay_domain_alias_operation(
"add", domain, "relay-domain-alias2.tld"
)
self._check_limit("domain_aliases", 2, 2)
self._relay_domain_alias_operation(
"add", domain, "relay-domain-alias3.tld", 403
)
self._check_limit("domain_aliases", 2, 2)
self._relay_domain_alias_operation(
"delete", domain, "relay-domain-alias2.tld"
)
self._check_limit("domain_aliases", 1, 2)
class MapFilesTestCase(MapFilesTestCaseMixin, TestCase):
"""Test case for relaydomains."""
MAP_FILES = [
"sql-relaydomains.cf",
"sql-relay-recipient-verification.cf"
]
|
import pykitti
from .data import Downloader
from .kitti_cameras_calibration_factory import KittyCamerasCalibrationFactory
from .poses_dataset_adapter import PosesDatasetAdapter
from .video_dataset_adapter import VideoDatasetAdapter
from ..concat_dataset import ConcatDataset
from ..data_transform_manager import DataTransformManager
from ..unsupervised_depth_data_module import UnsupervisedDepthDataModule
from ..video_dataset import VideoDataset
class KittiDataModuleFactory():
def __init__(self, frames, sequences="08", directory="datasets"):
if type(sequences) != list:
sequences = [sequences]
self._kitti_datasets = [self.make_kitti_dataset(directory, x, frames) for x in sequences]
@staticmethod
def make_kitti_dataset(directory, sequence, frames):
sequence = Downloader(sequence, directory)
return pykitti.odometry(sequence.main_dir, sequence.sequence_id, frames=frames)
@staticmethod
def make_video_dataset(kitti_dataset):
return VideoDataset(
VideoDatasetAdapter(kitti_dataset, 0),
VideoDatasetAdapter(kitti_dataset, 1),
PosesDatasetAdapter(kitti_dataset)
)
def make_dataset_manager(self,
final_image_size,
transform_manager_parameters,
batch_size=64,
split=(80, 10, 10),
num_workers=4,
device="cpu"):
original_image_size = self._kitti_datasets[0].get_rgb(0)[0].size[::-1]
transform_manager = DataTransformManager(
original_image_size,
final_image_size,
transform_manager_parameters
)
dataset = ConcatDataset([self.make_video_dataset(x) for x in self._kitti_datasets])
cameras_calibration = KittyCamerasCalibrationFactory().make_cameras_calibration(
original_image_size,
final_image_size,
device
)
return UnsupervisedDepthDataModule(dataset,
transform_manager,
cameras_calibration,
batch_size,
num_workers=num_workers,
split=split)
def make_data_module_from_params(self, params):
transform_manager_parameters = {
"filters": params.transform_filters
}
return self.make_dataset_manager(params.image_size, transform_manager_parameters,
params.batch_size, params.split, params.num_workers)
|
from flask import Response
from flask.blueprints import Blueprint
import logging
from flask_login import login_required, current_user
from flask.templating import render_template
from flask.globals import request
from flask.helpers import flash, url_for, make_response
from waitlist.blueprints.settings import add_menu_entry
from waitlist.permissions import perm_manager
from waitlist.storage.database import Ticket
from waitlist.base import db
import flask
from datetime import datetime, timedelta
from sqlalchemy.sql.expression import desc
from flask_babel import gettext, lazy_gettext
logger = logging.getLogger(__name__)
feedback = Blueprint('feedback', __name__)
perm_manager.define_permission('feedback_view')
perm_manager.define_permission('feedback_edit')
perm_view = perm_manager.get_permission('feedback_view')
perm_edit = perm_manager.get_permission('feedback_edit')
@feedback.route("/", methods=["GET"])
@login_required
def index() -> Response:
# get old feedback and input data back
char_id = current_user.get_eve_id()
tickets = db.session.query(Ticket).filter(Ticket.characterID == char_id).all()
return render_template("feedback/index.html", tickets=tickets)
@feedback.route("/", methods=["POST"])
@login_required
def submit() -> Response:
title = request.form['title']
if title is None or len(title) > 50:
return flask.abort(400, "Title is to long (max 50)")
message = request.form['message']
if message is None:
return flask.abort(400)
char_id = current_user.get_eve_id()
if message != "":
ticket = Ticket(
title=title,
characterID=char_id,
message=message,
state="new"
)
db.session.add(ticket)
db.session.commit()
flash(gettext("Thank You for your feedback!"), "info")
return flask.redirect(url_for('.index'))
@feedback.route("/settings", methods=["GET"])
@perm_view.require(http_exception=401)
def settings() -> Response:
# only give tickets that are not "closed" and not older then 90 days
time_90days_ago = datetime.utcnow() - timedelta(90)
tickets = db.session.query(Ticket).filter((Ticket.time > time_90days_ago) & (Ticket.state == "new"))\
.order_by(desc(Ticket.time)).all()
return render_template("feedback/settings.html", tickets=tickets)
@feedback.route("/settings", methods=["POST"])
@perm_edit.require(http_exception=401)
def change_status() -> Response:
ticket_id = int(request.form.get('ticketID'))
new_status = request.form.get('ticketStatus')
ticket = db.session.query(Ticket).get(ticket_id)
ticket.state = new_status
db.session.commit()
return make_response("OK")
add_menu_entry('feedback.settings', lazy_gettext('Feedback'), perm_view.can)
|
"""
Created by Alex Wang
On 2018-07-30
Model: https://github.com/yule-li/CosFace
[Configurations]:
lfw_pairs: data/pairs.txt
embedding_size: 1024
model_def: models.inception_resnet_v1
save_model: False
do_flip: False
image_width: 112
lfw_dir: dataset/lfw-112x96
prewhiten: False
lfw_nrof_folds: 10
image_height: 112
lfw_batch_size: 200
image_size: 224
fc_bn: True
model: models/model-20180626-205832.ckpt-60000
network_type: sphere_network
lfw_file_ext: jpg
[End of configuration]
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
import cv2
import numpy as np
import sphere_network
import utils
image_width = 112
image_height = 112
embedding_size = 1024
# face_threshold = 1.49
# face_threshold = 1.54
face_threshold = 0.95
face_combine_threshold = 0.7
save_threshold_min = 0.5
save_threshold_max = 0.7
class CosFace(object):
"""
"""
def __init__(self, weight_file):
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
self.__graph = tf.Graph()
with self.__graph.as_default():
self.__session = tf.Session(config=config, graph=self.__graph)
self.images_placeholder = tf.placeholder(tf.float32, shape=(
None, image_height, image_width, 3), name='image')
self.phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
prelogits = sphere_network.infer(self.images_placeholder, embedding_size)
prelogits = slim.batch_norm(prelogits,
is_training=self.phase_train_placeholder,
epsilon=1e-5,
scale=True,
scope='softmax_bn')
self.embeddings = tf.identity(prelogits)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
saver.restore(self.__session, weight_file)
def infer(self, images, do_flip=False):
"""
:param images: utils.py-->load_data
rgb format
resize to (image_height, image_width, 3)
img = img - 127.5
img = img / 128.
:return:
"""
feed_dict = {self.images_placeholder: images, self.phase_train_placeholder: False}
feats = self.__session.run(self.embeddings, feed_dict=feed_dict)
if do_flip:
images_flip = [np.fliplr(image) for image in images]
feed_dict_flip = {self.images_placeholder: images_flip, self.phase_train_placeholder: False}
feats_flip = self.__session.run(self.embeddings, feed_dict=feed_dict_flip)
feats = np.concatenate((feats, feats_flip), axis=1)
feats = utils.l2_normalize(feats)
return feats
def data_preprocess(self, image):
"""
:param image: opencv bgr image
:return:
"""
img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width = img_rgb.shape[0:2]
img_new = np.zeros((image_height, image_width, 3), dtype=np.float64)
ratio = min(image_height * 1.0 / height, image_width * 1.0 / width)
new_height, new_width = int(height * ratio), int(width * ratio)
height_offset, width_offset = (image_height - new_height) //2, (image_width - new_width) // 2
img_rgb = cv2.resize(img_rgb, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img_rgb = img_rgb.astype(np.float64)
img_new[height_offset: height_offset + new_height, width_offset: width_offset + new_width, :] = img_rgb
# img_new = cv2.resize(img_rgb, (image_height, image_width), interpolation=cv2.INTER_CUBIC)
# img_new = img_new.astype(np.float64)
img_new -= 127.5
img_new /= 128.
return img_new
def face_dist(self, embedding_one, embedding_two):
diff = np.subtract(embedding_one, embedding_two)
dist = np.sum(np.square(diff))
return dist
def face_dist_multiple(self, embeddings_one, embeddings_two):
diff = np.subtract(embeddings_one, embeddings_two)
dist = np.sum(np.square(diff), 1)
return dist
def __del__(self):
self.__session.close()
|
"""NUI Galway CT5132/CT5148 Programming and Tools for AI (James McDermott)
Skeleton/solution for Assignment 1: Numerical Integration
By writing my name below and submitting this file, I/we declare that
all additions to the provided skeleton file are my/our own work, and
that I/we have not seen any work on this assignment by another
student/group.
Student name(s): Philip O' Connor
Student ID(s): 21249304
"""
import numpy as np
import sympy
import itertools
import math
def numint_py(f, a, b, n):
"""Numerical integration. For a function f, calculate the definite
integral of f from a to b by approximating with n "slices" and the
"left" scheme. This function must use pure Python, no Numpy.
>>> round(numint_py(math.sin, 0, 1, 100), 5)
0.45549
>>> round(numint_py(lambda x: x, 0, 1, 100000), 3 )
0.5
>>> round(numint_py(lambda x: x, 0, 1, 6), 5)
0.41667
>>> round(numint_py(lambda x: 1, 0, 1, 100), 5)
1.0
>>> round(numint_py(lambda x: -1, 0, 1, 100), 5)
-1.0
>>> round(numint_py(math.exp, 1, 2, 100), 5)
4.64746
"""
A = 0
w = (b - a) / float(n) # width of one slice
# STUDENTS ADD CODE FROM HERE TO END OF FUNCTION
integral = 0 # initialise the intergral variable
x=a # Start at with the initial position of the integral
for i in range(n): # For each of slice of the integral except the final slice (following left rectangular scheme)
integral= (f(x)*w)+ integral # Calculate the area of the new slice and add it to the previous slices
x+=w # Increment x by slice size w
return(integral)
def numint(f, a, b, n, scheme="left"):
"""Numerical integration. For a function f, calculate the definite
integral of f from a to b by approximating with n "slices" and the
given scheme. This function should use Numpy, and no for-loop. Eg
np.linspace() will be useful.
>>> round(numint(np.sin, 0, 1, 100, 'left'), 5)
0.45549
>>> round(numint(lambda x: np.ones_like(x), 0, 1, 100, 'left'), 5)
1.0
>>> round(numint(np.exp, 1, 2, 100, 'left'), 5)
4.64746
>>> round(numint(np.exp, 1, 2, 100, 'midpoint'), 5)
4.67075
>>> round(numint(np.sin, 0, 1, 100, 'midpoint'), 5)
0.4597
>>> round(numint(np.exp, 1, 2, 100, 'right'), 5)
4.69417
"""
# STUDENTS ADD CODE FROM HERE TO END OF FUNCTION
step =(b - a) / float(n)
xrange = np.linspace(a,b-step,n) # Create the linear range for values to be used for x
#step = abs(a-xrange[1]) # Determine the step size in the linear space
if scheme =='left':
return (f(xrange)*step).sum() # Multiply f(x) by the step size to give the area per slice and sum it together excluding the final point as this is the left rectangular scheme
elif scheme == 'midpoint':
xrange2 = np.linspace(a+step,b,n) # Create a new linear space with the step size offset
midpoint = (xrange+xrange2)/2 # Sum the two ranges together elementwise and divide by two to get the mean value
return (f(midpoint)*step).sum() # Multiply f(x) by the step size to give the area per slice and sum it together
elif scheme == 'right':
xrange_right = np.linspace(a+step,b,n)
return (f(xrange_right)*step).sum() # Multiply f(x) by the step size to give the area per slice and sum it together excluding the first point as this is the right rectangular schem
def true_integral(fstr, a, b):
"""Using Sympy, calculate the definite integral of f from a to b and
return as a float. Here fstr is an expression in x, as a str. It
should use eg "np.sin" for the sin function.
This function is quite tricky, so you are not expected to
understand it or change it! However, you should understand how to
use it. See the doctest example.
>>> true_integral("np.sin(x)", 0, 2 * np.pi)
0.0
>>> true_integral("x**2", 0, 1)
0.3333333333333333
"""
x = sympy.symbols("x")
# make fsym, a Sympy expression in x, now using eg "sympy.sin"
fsym = eval(fstr.replace("np", "sympy"))
A = sympy.integrate(fsym, (x, a, b)) # definite integral
A = float(A.evalf()) # convert to float
return A
def numint_err(fstr, a, b, n, scheme):
"""For a given function fstr and bounds a, b, evaluate the error
achieved by numerical integration on n points with the given
scheme. Return the true value (given by true_integral),
absolute error, and relative error, as a tuple.
Notice that the absolute error and relative error must both be
positive.
Notice that the relative error will be infinity when the true
value is zero. None of the examples in our assignment will have a
true value of zero.
>>> print("%.4f %.4f %.4f" % numint_err("x**2", 0, 1, 10, 'left'))
0.3333 0.0483 0.1450
>>> print("%.4f %.4f %.4f" % numint_err("-x**2", 0, 1, 10, 'left'))
-0.3333 0.0483 0.1450
>>> print("%.4f %.4f %.4f" % numint_err("x**2", 0, 1, 10, 'left'))
0.3333 0.0483 0.1450
"""
f = eval("lambda x: " + fstr) # f is a Python function
A = true_integral(fstr, a, b)
# STUDENTS ADD CODE FROM HERE TO END OF FUNCTION
integ = numint(f, a,b, n, scheme)
try:
return(A,abs(A-integ), abs((A-integ)/A )) # Calculations for absolute and relative error
except ZeroDivisionError:
print("Curve defined has 0 area - cannot calculate relative error")
def make_table(f_ab_s, ns, schemes):
"""For each function f with associated bounds (a, b), and each value
of n and each scheme, calculate the absolute and relative error of
numerical integration and print out one line of a table. This
function doesn't need to return anything, just print. Each
function and bounds will be a tuple (f, a, b), so the argument
f_ab_s is a list of tuples.
Hint: use print() with the format string
"%s,%.2f,%.2f,%d,%s,%.4g,%.4g,%.4g". Hint 2: consider itertools.
>>> make_table([("x**2", 0, 1), ("np.sin(x)", 0, 1)], [10, 100], ['left', 'midpoint'])
x**2,0.00,1.00,10,left,0.3333,0.04833,0.145
x**2,0.00,1.00,10,midpoint,0.3333,0.0008333,0.0025
x**2,0.00,1.00,100,left,0.3333,0.004983,0.01495
x**2,0.00,1.00,100,midpoint,0.3333,8.333e-06,2.5e-05
np.sin(x),0.00,1.00,10,left,0.4597,0.04246,0.09236
np.sin(x),0.00,1.00,10,midpoint,0.4597,0.0001916,0.0004168
np.sin(x),0.00,1.00,100,left,0.4597,0.004211,0.009161
np.sin(x),0.00,1.00,100,midpoint,0.4597,1.915e-06,4.167e-06
"""
# STUDENTS ADD CODE FROM HERE TO END OF FUNCTION
for (fstr, a, b), n, scheme in itertools.product(f_ab_s, ns, schemes): # Unpack the inputs given and assign to their respective variables.
error = numint_err(fstr, a, b, n, scheme)
print(f"{fstr:s},{a:0.2f},{b:0.2f},{n:d},{scheme:s},{error[0]:.4g},{error[1]:0.4g},{error[2]:0.4g}") # Print the output in the format requested
def main():
"""Call make_table() as specified in the pdf."""
# STUDENTS ADD CODE FROM HERE TO END OF FUNCTION
make_table([("np.cos(x)", 0, math.pi), ("np.sin(2*x)", 0, 1), ('np.exp(x)', 0, 1)], [10, 100, 1000], ['left', 'midpoint'])
"""
INTERPRETATION: STUDENTS ADD TEXT HERE TO INTERPRET main() results.
Results from main() show that the midpoint solutions consistently gave the closest aproximations of the true integral across all f().
With all functions ran, and minimum relative errors compared for each function, midpoint is at least 3 orders of magnitude lower than the left method with the largest delta being 13 orders of magniude lower.
As n increases for sin(x) and exp(x) there is a non-linear decrease in observed absolute error. Cos(x) actually has an increase in absolute error at n=100 with the midpoint method, this increase in error declines at n=1000.
"""
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
|
from django.urls import path
from .views import QuestionsList,QuestionDetailView
urlpatterns = [
path("",QuestionsList.as_view()),
path("<int:pk>/",QuestionDetailView.as_view()),
]
|
from django.conf.urls.defaults import patterns, include, url
from tastypie.api import Api
from api import *
from django.contrib import admin
admin.autodiscover()
v1_api = Api(api_name='v1')
v1_api.register(UserResource())
v1_api.register(CurrentAccountResource())
urlpatterns = patterns('',
url(r'^$', include('currentaccount.urls')),
url(r'^current_account/', include('currentaccount.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(v1_api.urls)),
url(r'^accounts/login/',
'django.contrib.auth.views.login',
{ 'template_name' : 'currentaccount/login.html'}
),
)
|
"""possys URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from rest_framework import routers
from . import views as possys_views
urlpatterns = [
path('', possys_views.StoreView.as_view(), name="store"),
path('history/', possys_views.HistoryView.as_view(), name="history"),
path('api/add_transaction/<idm>/<int:product_id>/', possys_views.add_transaction_with_product),
path('api/add_transaction/<idm>/<int:price>/', possys_views.add_transaction_without_product),
]
router = routers.SimpleRouter()
router.register(r'possys/products', possys_views.ProductViewSet)
router.register(r'possys/categories', possys_views.CategoryViewSet)
|
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import msger
import pluginbase
from mic.conf import configmgr
from mic.utils import errors
__ALL__ = ['PluginMgr', 'pluginmgr']
PLUGIN_TYPES = ["imager", "backend"] # TODO "hook"
class PluginMgr(object):
plugin_dirs = {}
# make the manager class as singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(PluginMgr, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.plugin_dir = configmgr.common['plugin_dir']
def append_dirs(self, dirs):
for path in dirs:
self._add_plugindir(path)
# load all the plugins AGAIN
self._load_all()
def _add_plugindir(self, path):
path = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(path):
msger.warning("Plugin dir is not a directory or does not exist: %s"\
% path)
return
if path not in self.plugin_dirs:
self.plugin_dirs[path] = False
# the value True/False means "loaded"
def _load_all(self):
for (pdir, loaded) in self.plugin_dirs.iteritems():
if loaded: continue
sys.path.insert(0, pdir)
for mod in [x[:-3] for x in os.listdir(pdir) if x.endswith(".py")]:
if mod and mod != '__init__':
if mod in sys.modules:
#self.plugin_dirs[pdir] = True
msger.warning("Module %s already exists, skip" % mod)
else:
try:
pymod = __import__(mod)
self.plugin_dirs[pdir] = True
msger.debug("Plugin module %s:%s imported"\
% (mod, pymod.__file__))
except ImportError, e:
msger.warning('Loading failed, skip plugin %s/%s'\
% (os.path.basename(pdir), mod))
del(sys.path[0])
def get_plugins(self, ptype):
""" the return value is dict of name:class pairs """
if ptype not in PLUGIN_TYPES:
raise errors.CreatorError('%s is not valid plugin type' % ptype)
self._add_plugindir(os.path.join(self.plugin_dir, ptype))
self._load_all()
return pluginbase.get_plugins(ptype)
pluginmgr = PluginMgr()
|
from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework import status
from .serializers import RegisterSerializer,ChangePasswordSerializer
from rest_framework import generics, permissions
from django.http import JsonResponse
from django.core.exceptions import ObjectDoesNotExist
class RegisterView(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if (serializer.is_valid()):
user = serializer.save()
# id = '1236'
# username = 'abd'
# email = 'abd@gmail.com'
# password = request.data['password']
# user = User(id=id,username=username,email=email)
# user.set_password(password)
# user.save()
refresh = RefreshToken.for_user(user)
return JsonResponse(
{
"status": "success",
'user_id': user.id,
'refresh': str(refresh),
'access': str(refresh.access_token)
})
class ChangePasswordView(generics.UpdateAPIView):
queryset = User.objects.all()
permission_classes = (IsAuthenticated,)
serializer_class = ChangePasswordSerializer
#@detail_route(methods='PUT')
# def perform_update(self, serializer):
# try:
# if request.method == "PUT":
# serializer.save(user=self.request.user.id)
# except film.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
class personalInfo(APIView):
def get(self, request):
token = request.META.get('HTTP_AUTHORIZATION', " ").split(' ')[1]
data = request.data
user={'id':request.user.id,'username':request.user.username,'email':request.user.email}
return Response(user)
#
|
import sys
d =dict()
for line in open(sys.argv[1]):
sp = line.strip().split(" ")
d[sp[2]] = d.get(sp[2], list())
d[sp[2]].append((int(sp[0]), sp[1]))
for key in d.keys():
d[key] = sorted(d[key], key=lambda x:-x[0])
for i in range(0, max([len(x) for x in d.values()])):
for key in d.keys():
if len(d[key]) <= i:
continue
print d[key][i][0], d[key][i][1], key
|
# https://splinter.readthedocs.io/en/latest/drivers/chrome.html
from splinter import Browser
from bs4 import BeautifulSoup
executable_path = {'executable_path': 'chromedriver'}
browser = Browser('chrome', **executable_path, headless=True)
url = 'http://quotes.toscrape.com/'
browser.visit(url)
for x in range(1, 6):
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
quotes = soup.find_all('span', class_='text')
for quote in quotes:
print('page:', x, '-------------')
print(quote.text)
browser.click_link_by_partial_text('Next')
|
import sys
sys.path.append('C:\\Users\\nikit\\AppData\\Local\\Programs\\Python\\python38\\lib\\site-packages')
import NBodyPlotter as nbp
from NBodyPlotter import NBodySolver
from NBodyPlotter import Body
import matplotlib.pyplot as plt
import numpy as np
#Define scale values to keep close to unity
mass_scale = 1e30 #Kg
dist_scale = 1e11 #m
vel_scal = 1000 #m/s (such that inputted units are in Km/s)
orbit_period = 356*24*60*60 #s
solver = NBodySolver()
solver.SetSolverRelativeValues(mass_scale, dist_scale, vel_scal, orbit_period)
#Create figures
fig = plt.figure(figsize=(20,8))
ax1 = fig.add_subplot(151, projection='3d')
ax2 = fig.add_subplot(152, projection='3d')
ax3 = fig.add_subplot(153, projection='3d')
ax4 = fig.add_subplot(154, projection='3d')
ax5 = fig.add_subplot(155, projection='3d')
t = 10
time_span=np.linspace(0,t,t*10000)
ip1 = np.array([-1,0,0])
ip2 = -ip1
ip3 = np.array([0,0,0])
vx1 = 0.30689 * 10
vy1 = 0.12551 * 10
iv1 = np.array([vx1,vy1, 0])
iv2 = np.array([vx1, vy1, 0])
iv3 = np.array([-2*vx1, -2*vy1, 0])
solver.AddBody(Body("1", 0.1, ip1, iv1))
solver.AddBody(Body("2", 0.1, ip2, iv2))
solver.AddBody(Body("3", 0.1, ip3, iv3))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax1, show=False)
ax1.set_title("Stable butterfly system (" + str(t) + " years)")
solver.bodies.clear()
solver.AddBody(Body("1", 0.1, ip1, iv1*1.5))
solver.AddBody(Body("2", 0.1, ip2, iv2))
solver.AddBody(Body("3", 0.1, ip3, iv3))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax2, show=False, legend=False)
ax2.set_title(r"$v_{1_{new}}=1.5v_1$ - Stable (" + str(t) + " years)")
solver.bodies.clear()
solver.AddBody(Body("1", 0.1, ip1, iv1*3))
solver.AddBody(Body("2", 0.1, ip2, iv2))
solver.AddBody(Body("3", 0.1, ip3, iv3))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax3, show=False, legend=False)
ax3.set_title(r"$v_{1_{new}}=3v_1$ - Chaotic (" + str(t) + " years)")
t = 30
time_span=np.linspace(0,t,t*10000)
solver.bodies.clear()
solver.AddBody(Body("1", 0.15, ip1, iv1))
solver.AddBody(Body("2", 0.1, ip2, iv2))
solver.AddBody(Body("3", 0.1, ip3, iv3))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax4, show=False, legend=False)
ax4.set_title(r"$m_{1_{new}}=1.5m_1$ - Stable (" + str(t) + " years)")
solver.bodies.clear()
t = 10
time_span=np.linspace(0,t,t*10000)
solver.AddBody(Body("1", 0.18, ip1, iv1))
solver.AddBody(Body("2", 0.1, ip2, iv2))
solver.AddBody(Body("3", 0.1, ip3, iv3))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax5, show=False, legend=False)
ax5.set_title(r"$m_{1_{new}}=1.8m_1$ - Chaotic (" + str(t) + " years)")
fig.suptitle("Butterfly system with varying initial velocities and masses")
fig.savefig("butterfly_varying_mass_vel.png", dpi = fig.dpi)
plt.show()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Low-level USB transciever gateware -- control request components. """
import unittest
import functools
import operator
from amaranth import Signal, Module, Elaboratable, Cat
from amaranth.hdl.rec import Record, DIR_FANOUT
from . import USBSpeed
from .packet import USBTokenDetector, USBDataPacketDeserializer, USBPacketizerTest
from .packet import DataCRCInterface, USBInterpacketTimer, TokenDetectorInterface
from .packet import InterpacketTimerInterface, HandshakeExchangeInterface
from ..stream import USBInStreamInterface, USBOutStreamInterface
from ..request import SetupPacket
from ...utils.bus import OneHotMultiplexer
from ...test import usb_domain_test_case
class RequestHandlerInterface:
""" Record representing a connection between a control endpoint and a request handler.
Components (I = input to request handler; O = output to control interface):
*: setup -- Carries the most recent setup request to the handler.
*: tokenizer -- Carries information about any incoming token packets.
# Control request status signals.
I: data_requested -- Pulsed to indicate that a data-phase IN token has been issued,
and it's now time to respond (post-inter-packet delay).
I: status_requested -- Pulsed to indicate that a response to our status phase has been
requested.
# Address / configuration connections.
O: address_changed -- Strobe; pulses high when the device's address should be changed.
O: new_address[7] -- When `address_changed` is high, this field contains the address that
should be adopted.
I: active_config -- The configuration number of the active configuration.
O: config_changed -- Strobe; pulses high when the device's configuration should be changed.
O: new_config[8] -- When `config_changed` is high, this field contains the configuration that
should be applied.
# Data rx signals.
*: rx -- The receive stream for any data packets received.
I: handshakes_in -- Inputs that indicate when handshakes are detected from the host.
I: rx_ready_for_response -- Strobe that indicates that we're ready to respond to a complete transmission.
Indicates that an interpacket delay has passed after an `rx_complete` strobe.
I: rx_invalid: -- Strobe that indicates an invalid data receipt. Indicates that the most recently
received packet was corrupted; and should be discarded as invalid.
# Data tx signals.
*: tx -- The transmit stream for any packets generated by the handler.
O: handshakes_out -- Carries handshake generation requests.
"""
def __init__(self):
self.setup = SetupPacket()
self.tokenizer = TokenDetectorInterface()
self.data_requested = Signal()
self.status_requested = Signal()
self.address_changed = Signal()
self.new_address = Signal(7)
self.active_config = Signal(8)
self.config_changed = Signal()
self.new_config = Signal(8)
self.rx = USBOutStreamInterface()
self.rx_expected = Signal()
self.rx_ready_for_response = Signal()
self.rx_invalid = Signal()
self.tx = USBInStreamInterface()
self.handshakes_out = HandshakeExchangeInterface(is_detector=True)
self.handshakes_in = HandshakeExchangeInterface(is_detector=False)
self.tx_data_pid = Signal(reset=1)
class USBRequestHandler(Elaboratable):
""" Base class for USB request handler modules.
I/O port:
*: interface -- The RequestHandlerInterface we'll use.
"""
def __init__(self):
#
# I/O port:
#
self.interface = RequestHandlerInterface()
def send_zlp(self):
""" Returns the statements necessary to send a zero-length packet."""
tx = self.interface.tx
# Send a ZLP along our transmit interface.
# Our interface accepts 'valid' and 'last' without 'first' as a ZLP.
return [
tx.valid .eq(1),
tx.last .eq(1)
]
class USBSetupDecoder(Elaboratable):
""" Gateware responsible for detecting Setup transactions.
I/O port:
*: data_crc -- Interface to the device's data-CRC generator.
*: tokenizer -- Interface to the device's token detector.
*: timer -- Interface to the device's interpacket timer.
I: speed -- The device's current operating speed. Should be a USBSpeed
enumeration value -- 0 for high, 1 for full, 2 for low.
*: packet -- The SetupPacket record carrying our parsed output.
I: ack -- True when we're requesting that an ACK be generated.
"""
SETUP_PID = 0b1101
def __init__(self, *, utmi, standalone=False):
"""
Paremeters:
utmi -- The UTMI bus we'll monitor for data. We'll consider this read-only.
standalone -- Debug parameter. If true, this module will operate without external components;
i.e. without an internal data-CRC generator, or tokenizer. In this case, tokenizer
and timer should be set to None; and will be ignored.
"""
self.utmi = utmi
self.standalone = standalone
#
# I/O port.
#
self.data_crc = DataCRCInterface()
self.tokenizer = TokenDetectorInterface()
self.timer = InterpacketTimerInterface()
self.speed = Signal(2)
self.packet = SetupPacket()
self.ack = Signal()
def elaborate(self, platform):
m = Module()
# If we're standalone, generate the things we need.
if self.standalone:
# Create our tokenizer...
m.submodules.tokenizer = tokenizer = USBTokenDetector(utmi=self.utmi)
m.d.comb += tokenizer.interface.connect(self.tokenizer)
# ... and our timer.
m.submodules.timer = timer = USBInterpacketTimer()
timer.add_interface(self.timer)
m.d.comb += timer.speed.eq(self.speed)
# Create a data-packet-deserializer, which we'll use to capture the
# contents of the setup data packets.
m.submodules.data_handler = data_handler = \
USBDataPacketDeserializer(utmi=self.utmi, max_packet_size=8, create_crc_generator=self.standalone)
m.d.comb += self.data_crc.connect(data_handler.data_crc)
# Instruct our interpacket timer to begin counting when we complete receiving
# our setup packet. This will allow us to track interpacket delays.
m.d.comb += self.timer.start.eq(data_handler.new_packet)
# Keep our output signals de-asserted unless specified.
m.d.usb += [
self.packet.received .eq(0),
]
with m.FSM(domain="usb"):
# IDLE -- we haven't yet detected a SETUP transaction directed at us
with m.State('IDLE'):
pid_matches = (self.tokenizer.pid == self.SETUP_PID)
# If we're just received a new SETUP token addressed to us,
# the next data packet is going to be for us.
with m.If(pid_matches & self.tokenizer.new_token):
m.next = 'READ_DATA'
# READ_DATA -- we've just seen a SETUP token, and are waiting for the
# data payload of the transaction, which contains the setup packet.
with m.State('READ_DATA'):
# If we receive a token packet before we receive a DATA packet,
# this is a PID mismatch. Bail out and start over.
with m.If(self.tokenizer.new_token):
m.next = 'IDLE'
# If we have a new packet, parse it as setup data.
with m.If(data_handler.new_packet):
# If we got exactly eight bytes, this is a valid setup packet.
with m.If(data_handler.length == 8):
# Collect the signals that make up our bmRequestType [USB2, 9.3].
request_type = Cat(self.packet.recipient, self.packet.type, self.packet.is_in_request)
m.d.usb += [
# Parse the setup data itself...
request_type .eq(data_handler.packet[0]),
self.packet.request .eq(data_handler.packet[1]),
self.packet.value .eq(Cat(data_handler.packet[2], data_handler.packet[3])),
self.packet.index .eq(Cat(data_handler.packet[4], data_handler.packet[5])),
self.packet.length .eq(Cat(data_handler.packet[6], data_handler.packet[7])),
# ... and indicate that we have new data.
self.packet.received .eq(1),
]
# We'll now need to wait a receive-transmit delay before initiating our ACK.
# Per the USB 2.0 and ULPI 1.1 specifications:
# - A HS device needs to wait 8 HS bit periods before transmitting [USB2, 7.1.18.2].
# Each ULPI cycle is 8 HS bit periods, so we'll only need to wait one cycle.
# - We'll use our interpacket delay timer for everything else.
with m.If(self.timer.tx_allowed | (self.speed == USBSpeed.HIGH)):
# If we're a high speed device, we only need to wait for a single ULPI cycle.
# Processing delays mean we've already met our interpacket delay; and we can ACK
# immediately.
m.d.comb += self.ack.eq(1)
m.next = "IDLE"
# For other cases, handle the interpacket delay by waiting.
with m.Else():
m.next = "INTERPACKET_DELAY"
# Otherwise, this isn't; and we should ignore it. [USB2, 8.5.3]
with m.Else():
m.next = "IDLE"
# INTERPACKET -- wait for an inter-packet delay before responding
with m.State('INTERPACKET_DELAY'):
# ... and once it equals zero, ACK and return to idle.
with m.If(self.timer.tx_allowed):
m.d.comb += self.ack.eq(1)
m.next = "IDLE"
return m
class USBSetupDecoderTest(USBPacketizerTest):
FRAGMENT_UNDER_TEST = USBSetupDecoder
FRAGMENT_ARGUMENTS = {'standalone': True}
def initialize_signals(self):
# Assume high speed.
yield self.dut.speed.eq(USBSpeed.HIGH)
def provide_reference_setup_transaction(self):
""" Provide a reference SETUP transaction. """
# Provide our setup packet.
yield from self.provide_packet(
0b00101101, # PID: SETUP token.
0b00000000, 0b00010000 # Address 0, endpoint 0, CRC
)
# Provide our data packet.
yield from self.provide_packet(
0b11000011, # PID: DATA0
0b0_10_00010, # out vendor request to endpoint
12, # request number 12
0xcd, 0xab, # value 0xABCD (little endian)
0x23, 0x01, # index 0x0123
0x78, 0x56, # length 0x5678
0x3b, 0xa2, # CRC
)
@usb_domain_test_case
def test_valid_sequence_receive(self):
dut = self.dut
# Before we receive anything, we shouldn't have a new packet.
self.assertEqual((yield dut.packet.received), 0)
# Simulate the host sending basic setup data.
yield from self.provide_reference_setup_transaction()
# We're high speed, so we should be ACK'ing immediately.
self.assertEqual((yield dut.ack), 1)
# We now should have received a new setup request.
yield
self.assertEqual((yield dut.packet.received), 1)
# Validate that its values are as we expect.
self.assertEqual((yield dut.packet.is_in_request), 0 )
self.assertEqual((yield dut.packet.type), 0b10 )
self.assertEqual((yield dut.packet.recipient), 0b00010 )
self.assertEqual((yield dut.packet.request), 12 )
self.assertEqual((yield dut.packet.value), 0xabcd )
self.assertEqual((yield dut.packet.index), 0x0123 )
self.assertEqual((yield dut.packet.length), 0x5678 )
@usb_domain_test_case
def test_fs_interpacket_delay(self):
dut = self.dut
# Place our DUT into full speed mode.
yield dut.speed.eq(USBSpeed.FULL)
# Before we receive anything, we shouldn't have a new packet.
self.assertEqual((yield dut.packet.received), 0)
# Simulate the host sending basic setup data.
yield from self.provide_reference_setup_transaction()
# We shouldn't ACK immediately; we'll need to wait our interpacket delay.
yield
self.assertEqual((yield dut.ack), 0)
# After our minimum interpacket delay, we should see an ACK.
yield from self.advance_cycles(10)
self.assertEqual((yield dut.ack), 1)
@usb_domain_test_case
def test_short_setup_packet(self):
dut = self.dut
# Before we receive anything, we shouldn't have a new packet.
self.assertEqual((yield dut.packet.received), 0)
# Provide our setup packet.
yield from self.provide_packet(
0b00101101, # PID: SETUP token.
0b00000000, 0b00010000 # Address 0, endpoint 0, CRC
)
# Provide our data packet; but shorter than expected.
yield from self.provide_packet(
0b11000011, # PID: DATA0
0b00100011, 0b01000101, 0b01100111, 0b10001001, # DATA
0b00011100, 0b00001110 # CRC
)
# This shouldn't count as a valid setup packet.
yield
self.assertEqual((yield dut.packet.received), 0)
class USBRequestHandlerMultiplexer(Elaboratable):
""" Multiplexes multiple RequestHandlers down to a single interface.
Interfaces are added using .add_interface().
I/O port:
*: shared -- The post-multiplexer RequestHandler interface.
"""
def __init__(self):
#
# I/O port
#
self.shared = RequestHandlerInterface()
#
# Internals
#
self._interfaces = []
def add_interface(self, interface: RequestHandlerInterface):
""" Adds a RequestHandlerInterface to the multiplexer.
Arbitration is not performed; it's expected only one handler will be
driving requests at a time.
"""
self._interfaces.append(interface)
def _multiplex_signals(self, m, *, when, multiplex, sub_bus=None):
""" Helper that creates a simple priority-encoder multiplexer.
Parmeters:
when -- The name of the interface signal that indicates that the `multiplex` signals
should be selected for output. If this signals should be multiplex, it
should be included in `multiplex`.
multiplex -- The names of the interface signals to be multiplexed.
"""
def get_signal(interface, name):
""" Fetches an interface signal by name / sub_bus. """
if sub_bus:
bus = getattr(interface, sub_bus)
return getattr(bus, name)
else:
return getattr(interface, name)
# We're building an if-elif tree; so we should start with an If entry.
conditional = m.If
for interface in self._interfaces:
condition = get_signal(interface, when)
with conditional(condition):
# Connect up each of our signals.
for signal_name in multiplex:
# Get the actual signals for our input and output...
driving_signal = get_signal(interface, signal_name)
target_signal = get_signal(self.shared, signal_name)
# ... and connect them.
m.d.comb += target_signal .eq(driving_signal)
# After the first element, all other entries should be created with Elif.
conditional = m.Elif
def elaborate(self, platform):
m = Module()
shared = self.shared
#
# Pass through signals being routed -to- our pre-mux interfaces.
#
for interface in self._interfaces:
m.d.comb += [
shared.setup .connect(interface.setup),
shared.tokenizer .connect(interface.tokenizer),
interface.data_requested .eq(shared.data_requested),
interface.status_requested .eq(shared.status_requested),
shared.handshakes_in .connect(interface.handshakes_in),
interface.active_config .eq(shared.active_config),
shared.rx .connect(interface.rx),
interface.rx_ready_for_response .eq(shared.rx_ready_for_response),
interface.rx_invalid .eq(shared.rx_invalid),
]
#
# Multiplex the signals being routed -from- our pre-mux interface.
#
self._multiplex_signals(m,
when='address_changed',
multiplex=['address_changed', 'new_address']
)
self._multiplex_signals(m,
when='config_changed',
multiplex=['config_changed', 'new_config']
)
# Connect up our transmit interface.
m.submodules.tx_mux = tx_mux = OneHotMultiplexer(
interface_type=USBInStreamInterface,
mux_signals=('payload',),
or_signals=('valid', 'first', 'last'),
pass_signals=('ready',)
)
tx_mux.add_interfaces(i.tx for i in self._interfaces)
m.d.comb += self.shared.tx.stream_eq(tx_mux.output)
# Pass through the relevant PID from our data source.
for i in self._interfaces:
with m.If(i.tx.valid):
m.d.comb += self.shared.tx_data_pid.eq(i.tx_data_pid)
# OR together all of our handshake-generation requests.
any_ack = functools.reduce(operator.__or__, (i.handshakes_out.ack for i in self._interfaces))
any_nak = functools.reduce(operator.__or__, (i.handshakes_out.nak for i in self._interfaces))
any_stall = functools.reduce(operator.__or__, (i.handshakes_out.stall for i in self._interfaces))
m.d.comb += [
shared.handshakes_out.ack .eq(any_ack),
shared.handshakes_out.nak .eq(any_nak),
shared.handshakes_out.stall .eq(any_stall),
]
return m
class StallOnlyRequestHandler(Elaboratable):
""" Simple gateware request handler that only conditionally stalls requests.
I/O port:
*: interface -- The RequestHandlerInterface used to handle requests.
See its record definition for signal definitions.
"""
def __init__(self, stall_condition):
"""
Parameters:
stall_condition -- A function that accepts a SetupRequest packet, and returns
an Amaranth conditional indicating whether we should stall.
"""
self.condition = stall_condition
#
# I/O port
#
self.interface = RequestHandlerInterface()
def elaborate(self, platform):
m = Module()
# If we have an opportunity to stall...
with m.If(self.interface.data_requested | self.interface.status_requested):
# ... and our stall condition is met ...
with m.If(self.condition(self.interface.setup)):
# ... do so.
m.d.comb += self.interface.handshakes_out.stall.eq(1)
return m
if __name__ == "__main__":
unittest.main(warnings="ignore")
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forwarding Rules Rule Scanner Test"""
import mock
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.scanner.scanners import forwarding_rule_scanner
from google.cloud.security.scanner.audit import forwarding_rule_rules_engine as fre
from tests.unittest_utils import get_datafile_path
from google.cloud.security.common.gcp_type import forwarding_rule as fr
class ForwardingRule(object):
"""Represents ForwardRule resource."""
class ForwardingRuleScannerTest(ForsetiTestCase):
def test_forwarding_rules_scanner_all_match(self):
rules_local_path = get_datafile_path(__file__,
'forward_rule_test_1.yaml')
scanner = forwarding_rule_scanner.ForwardingRuleScanner({}, {}, '', rules_local_path)
gcp_forwarding_rules_resource_data = [
{
"project_id": "abc-123",
"resource_id": "46",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.99",
"ip_protocol": "UDP",
"port_range": "4500-4500",
"ports": "[]",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "EXTERNAL",
},
{
"project_id": "abc-123",
"resource_id": "23",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.23",
"ip_protocol": "TCP",
"ports": "[8080]",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "INTERNAL",
},
{
"project_id": "abc-123",
"resource_id": "46",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.46",
"ip_protocol": "ESP",
"ports": "[]",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "EXTERNAL",
},
{
"project_id": "abc-123",
"resource_id": "46",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.35",
"ip_protocol": "TCP",
"port_range": "4500-4500",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "EXTERNAL",
}
]
gcp_forwarding_rules_resource_objs = []
for gcp_forwarding_rule_resource_data in gcp_forwarding_rules_resource_data:
gcp_forwarding_rules_resource_objs.append(
fr.ForwardingRule(**gcp_forwarding_rule_resource_data)
)
violations = scanner._find_violations(gcp_forwarding_rules_resource_objs)
self.assertEqual(0, len(violations))
def test_forwarding_rules_scanner_no_match(self):
rules_local_path = get_datafile_path(__file__,
'forward_rule_test_1.yaml')
scanner = forwarding_rule_scanner.ForwardingRuleScanner({}, {}, '', rules_local_path)
gcp_forwarding_rules_resource_data = [
{
"project_id": "abc-123",
"resource_id": "46",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.99",
"ip_protocol": "TCP",
"port_range": "4500-4500",
"ports": "[]",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "EXTERNAL",
},
{
"project_id": "abc-123",
"resource_id": "23",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.23",
"ip_protocol": "TCP",
"ports": "[8081]",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "INTERNAL",
},
{
"project_id": "abc-123",
"resource_id": "46",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.101.46",
"ip_protocol": "ESP",
"ports": "[]",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "EXTERNAL",
},
{
"project_id": "abc-123",
"resource_id": "46",
"creation_timestamp": "2017-06-01 04:19:37",
"name": "abc-123",
"description": "",
"region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1",
"ip_address": "198.51.100.35",
"ip_protocol": "TCP",
"port_range": "4400-4500",
"target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123",
"load_balancing_scheme": "EXTERNAL",
}
]
gcp_forwarding_rules_resource_objs = []
for gcp_forwarding_rule_resource_data in gcp_forwarding_rules_resource_data:
gcp_forwarding_rules_resource_objs.append(
fr.ForwardingRule(**gcp_forwarding_rule_resource_data)
)
violations = scanner._find_violations(gcp_forwarding_rules_resource_objs)
self.assertEqual(4, len(violations))
if __name__ == '__main__':
unittest.main()
|
import save
import yande_re
import log
import phttp
import time
import datetime
class Download:
def do_download(self,pic_size: {}, page, max_page, pic_type, path):
# 默认'C:/Yandere/'
wr = save.Save(path)
yande = yande_re.Yande()
lg = log.Log()
resp = phttp.Http()
wr.create_folder()
flag_id = int(wr.get('flag_id.data')) # 上次开始爬取时第一张图片ID。爬到此ID则终止此次爬取
i = 0 # 当前第几张
end = False # 爬取是否已结束
while True:
# 终止页码为0 或 未到达终止页码时 才进行爬取
if max_page == 0 or page <= max_page:
# 获取页面内容
lg.add('正在读取第' + str(page) + '页……')
html = yande.get_html(page)
# 获取每个li的内容
for li in yande.get_li(html):
i += 1
info = yande.get_info(li)[0] # (id, img_url, width, height)
width = int(info[2])
height = int(info[3])
# 存储last_start_id
if i == 1:
if len(info) == 4:
wr.clog('flag_id.data', info[0], True)
else:
# 第一张个li就出现了问题,这就无法存储last_start_id了
exit()
# 数据结构是否错误?
if len(info) != 4:
lg.add(str(i) + ' 错误,跳过')
continue
# 已经爬到上次开始爬的地方了 且 终止页码为0 本次爬取结束
if int(info[0]) == flag_id and max_page == 0:
end = True
break
download = False # 是否下载此图?
# 判断图片类型(不想写一长串……只好如此了)
if pic_type == 0:
download = True
elif pic_type == 1 and width > height:
download = True
elif pic_type == 2 and width < height:
download = True
elif pic_type == 3 and width == height:
download = True
else:
lg.add('图片类型不符,跳过')
continue
# 判断图片尺寸
if width >= pic_size['min']['width'] and height >= pic_size['min']['height']:
if pic_size['max']['width'] and width > pic_size['max']['width']:
download = False
if pic_size['max']['height'] and height > pic_size['max']['height']:
download = False
else:
download = False
# 判断图片宽高比
proportion = width / height
if proportion < pic_size['min']['proportion'] or (
pic_size['max']['proportion'] and proportion > pic_size['max']['proportion']):
download = False
if not download:
lg.add('图片尺寸不符,跳过')
continue
if download:
# 获取文件名
# 此处不进行URL解码,因为有些文件名神TM带*之类的
file_name = info[1].split('/')[-1]
# 提供时间戳命名格式 但可能会影响判断文件是否存在
# file_name = datetime.datetime.now().strftime('%Y%m%d') + "-" + info[0] + "-" + file_name
# 文件是否已存在?
if wr.exists(file_name):
lg.add(info[0] + ' 已存在,跳过')
continue
lg.add(str(i) + ' - ' + info[0] + ' 开始下载……')
ts = time.time()
img = resp.get(info[1], {'Host': 'files.yande.re',
'Referer': 'https://yande.re/post/show/' + info[0]}).content
lg.add('下载完毕。耗时:' + str(int(time.time() - ts)) + 's')
wr.write(file_name, img)
if end:
break
else:
break
page += 1
lg.add('爬取结束')
wr.clog('log_' + str(int(time.time())) + '.txt', lg.get())
exit(200)
|
# r mean read
# a means adding
# w means overwrite everything in the file
# r+ means modify the file
employee_file = open("employee.txt","r")
print(employee_file.readable())
print(" ")
print(employee_file.read())
print(" ")
print(employee_file.readline())
print(" ")
print(employee_file.readlines())
print(" ")
for employee in employee_file.readlines():
print(employee)
employee_file.close()
|
import os
import sys
## if you got module not found errors, uncomment these. PyCharm IDE does not need it.
# add .. and . to path, depending on what cwd was when python process was created, one of these might help find librf
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
#print sys.path
import os
import unittest
from testutils import get_file_hash, make_file_with_random_content
from librf import estream
class TestEscapeStream(unittest.TestCase):
# This will run once for all tests in this class, setUp will run once before every testcase
@classmethod
def setUpClass(cls):
super(TestEscapeStream, cls).setUpClass()
# cls.static_resource = get_some_resource()
print "---------------------- setUpClass() called"
make_file_with_random_content('1k.rand_bin', 1024)
make_file_with_random_content('1MB.rand_bin', 1024 * 1024)
make_file_with_random_content('10MB.rand_bin', 10 * 1024 * 1024)
# print get_file_hash('1k.rand_bin')
@classmethod
def tearDownClass(cls):
super(TestEscapeStream, cls).tearDownClass()
print "---------------------- tearDownClass() called"
files = ['1k.rand_bin', '1k.rand_bin.escaped', '1k.rand_bin.escaped.unescaped',
'1MB.rand_bin', '1MB.rand_bin.escaped', '1MB.rand_bin.escaped.unescaped',
'10MB.rand_bin', '10MB.rand_bin.escaped', '10MB.rand_bin.escaped.unescaped']
for file in files:
try:
os.remove(file)
except OSError:
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_1k(self):
rand_filename = '1k.rand_bin'
rand_filename_esc = rand_filename + '.escaped'
rand_filename_unesc = rand_filename_esc + '.unescaped'
print " ------------------------------------------------ testing " + rand_filename
orig_hash = get_file_hash(rand_filename)
esc_filesz = 0
unesc_filesz = 0
es = estream.EscapeStream()
esc_filesz = es.escape_and_save(rand_filename, rand_filename_esc )
unesc_filesz = es.unescape_and_save(rand_filename_esc, rand_filename_unesc)
escaped_then_unescaped = get_file_hash(rand_filename_unesc)
self.assertEqual(orig_hash, escaped_then_unescaped)
self.assertTrue(esc_filesz >= unesc_filesz)
print "esc_filesz: " + str(esc_filesz)
print "unesc_filesz: " + str(unesc_filesz)
def test_1M(self):
rand_filename = '1MB.rand_bin'
rand_filename_esc = rand_filename + '.escaped'
rand_filename_unesc = rand_filename_esc + '.unescaped'
print " ------------------------------------------------ testing " + rand_filename
orig_hash = get_file_hash(rand_filename)
es = estream.EscapeStream()
es.escape_and_save(rand_filename, rand_filename_esc )
es.unescape_and_save(rand_filename_esc, rand_filename_unesc)
escaped_then_unescaped = get_file_hash(rand_filename_unesc)
self.assertEqual(orig_hash, escaped_then_unescaped)
def test_10M(self):
rand_filename = '10MB.rand_bin'
rand_filename_esc = rand_filename + '.escaped'
rand_filename_unesc = rand_filename_esc + '.unescaped'
print " ------------------------------------------------ testing " + rand_filename
orig_hash = get_file_hash(rand_filename)
es = estream.EscapeStream()
es.escape_and_save(rand_filename, rand_filename_esc )
es.unescape_and_save(rand_filename_esc, rand_filename_unesc)
escaped_then_unescaped = get_file_hash(rand_filename_unesc)
self.assertEqual(orig_hash, escaped_then_unescaped)
def test_flag_byte_should_not_exist_in_escaped_file(self):
print " ------------------------------------------------ testing flag byte existence in escaped file"
rand_filename = '1k.rand_bin'
rand_filename_esc = rand_filename + '.escaped'
es = estream.EscapeStream()
es.escape_and_save(rand_filename, rand_filename_esc)
with open(rand_filename_esc, 'rb') as fin:
src_br = bytearray(fin.read(es.READ_SIZE))
while len(src_br) > 0:
for b in src_br:
self.assertNotEqual(b, es.FLAG_BYTE, "flag byte found in escaped file")
src_br = bytearray(fin.read(es.READ_SIZE))
def test_3(self):
self.assertTrue(True, "hi")
if __name__ == '__main__':
unittest.main()
|
from django import forms
from .models import Article, Category
choices = Category.objects.all().values_list('name', 'name')
choice_list = []
for item in choices:
choice_list.append(item)
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('title', 'title_tag', 'image', 'author', 'category', 'content')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'title_tag': forms.TextInput(attrs={'class': 'form-control'}),
'category': forms.Select(choices = choice_list, attrs={'class': 'form-control'}),
'author': forms.Select(attrs={'class': 'form-control'}),
'content': forms.Textarea(attrs={'class': 'form-control'}),
}
class EditArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('title', 'title_tag', 'content')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'title_tag': forms.TextInput(attrs={'class': 'form-control'}),
'content': forms.Textarea(attrs={'class': 'form-control'}),
}
|
#!/usr/bin/env python3
# coding: utf-8
import math
class MaxProbCut:
def __init__(self,word_count_path,word_trans_path):
self.word_dict = {} # 记录概率,1-gram
self.word_dict_count = {} # 记录词频,1-gram
self.trans_dict = {} # 记录概率,2-gram
self.trans_dict_count = {} # 记录词频,2-gram
self.max_wordlen = 0 #词的最长长度
self.all_freq = 0 # 所有词的词频总和,1-gram
word_count_path = word_count_path
word_trans_path = word_trans_path
self.init(word_count_path, word_trans_path)
# 加载词典
def init(self, word_count_path, word_trans_path):
self.word_dict_count = self.load_model(word_count_path)
self.all_freq = sum(self.word_dict_count.values()) # 所有词的词频
self.max_wordlen = max(len(key) for key in self.word_dict_count.keys())
for key in self.word_dict_count:
self.word_dict[key] = math.log(self.word_dict_count[key] / self.all_freq)
#计算转移概率
Trans_dict = self.load_model(word_trans_path)
for pre_word, post_info in Trans_dict.items():
for post_word, count in post_info.items():
word_pair = pre_word + ' ' + post_word
self.trans_dict_count[word_pair] = float(count)
if pre_word in self.word_dict_count.keys():
self.trans_dict[key] = math.log(count / self.word_dict_count[pre_word]) # 取自然对数,归一化
else:
self.trans_dict[key] = self.word_dict[post_word]
#加载预训练模型
def load_model(self, model_path):
f = open(model_path, 'r')
a = f.read()
word_dict = eval(a)
f.close()
return word_dict
# 估算未出现的词的概率,根据beautiful data里面的方法估算,平滑算法
def get_unknow_word_prob(self, word):
return math.log(1.0 / (self.all_freq ** len(word)))
# 获取候选词的概率
def get_word_prob(self, word):
if word in self.word_dict.keys(): # 如果字典包含这个词
prob = self.word_dict[word]
else:
prob = self.get_unknow_word_prob(word)
return prob
#获取转移概率
def get_word_trans_prob(self, pre_word, post_word):
trans_word = pre_word + " " + post_word
if trans_word in self.trans_dict_count.keys():
trans_prob = math.log(self.trans_dict_count[trans_word] / self.word_dict_count[pre_word])
else:
trans_prob = self.get_word_prob(post_word)
return trans_prob
# 寻找node的最佳前驱节点,方法为寻找所有可能的前驱片段
def get_best_pre_node(self, sentence, node, node_state_list):
# 如果node比最大词长小,取的片段长度以node的长度为限
max_seg_length = min([node, self.max_wordlen])
pre_node_list = [] # 前驱节点列表
# 获得所有的前驱片段,并记录累加概率
for segment_length in range(1, max_seg_length + 1):
segment_start_node = node - segment_length
segment = sentence[segment_start_node:node] # 获取片段
pre_node = segment_start_node # 取该片段,则记录对应的前驱节点
if pre_node == 0:
# 如果前驱片段开始节点是序列的开始节点,
# 则概率为<S>转移到当前词的概率
segment_prob = self.get_word_trans_prob("<BEG>", segment)
else: # 如果不是序列开始节点,按照二元概率计算
# 获得前驱片段的前一个词
pre_pre_node = node_state_list[pre_node]["pre_node"]
pre_pre_word = sentence[pre_pre_node:pre_node]
segment_prob = self.get_word_trans_prob(pre_pre_word, segment)
pre_node_prob_sum = node_state_list[pre_node]["prob_sum"] # 前驱节点的概率的累加值
# 当前node一个候选的累加概率值
candidate_prob_sum = pre_node_prob_sum + segment_prob
pre_node_list.append((pre_node, candidate_prob_sum))
# 找到最大的候选概率值
(best_pre_node, best_prob_sum) = max(pre_node_list, key=lambda d: d[1])
return best_pre_node, best_prob_sum
#切词主函数
def cut_main(self, sentence):
sentence = sentence.strip()
# 初始化
node_state_list = [] # 记录节点的最佳前驱,index就是位置信息
# 初始节点,也就是0节点信息
ini_state = {}
ini_state["pre_node"] = -1 # 前一个节点
ini_state["prob_sum"] = 0 # 当前的概率总和
node_state_list.append(ini_state)
# 字符串概率为2元概率, P(a b c) = P(a|<S>)P(b|a)P(c|b)
# 逐个节点寻找最佳前驱节点
for node in range(1, len(sentence) + 1):
# 寻找最佳前驱,并记录当前最大的概率累加值
(best_pre_node, best_prob_sum) = self.get_best_pre_node(sentence, node, node_state_list)
# 添加到队列
cur_node = {}
cur_node["pre_node"] = best_pre_node
cur_node["prob_sum"] = best_prob_sum
node_state_list.append(cur_node)
# print "cur node list",node_state_list
# step 2, 获得最优路径,从后到前
best_path = []
node = len(sentence) # 最后一个点
best_path.append(node)
while True:
pre_node = node_state_list[node]["pre_node"]
if pre_node == -1:
break
node = pre_node
best_path.append(node)
best_path.reverse()
# step 3, 构建切分
word_list = []
for i in range(len(best_path) - 1):
left = best_path[i]
right = best_path[i + 1]
word = sentence[left:right]
word_list.append(word)
return word_list
#测试接口
def cut(self, sentence):
return self.cut_main(sentence)
if __name__ == "__main__":
cuter = MaxProbCut("./word_dict.model",'./trans_dict.model')
sentence = "今天我不知道你为什么会这个样子"
seg_sentence = cuter.cut(sentence)
print("original sentence: ", sentence)
print("segment result: ", seg_sentence)
|
# -*- coding=utf-8 -*-
import os
import subprocess
import sys
import string
import csv
cloudfront_domain = "dusqglx8g3hsd.cloudfront.net"
trailers_cloudfront_domain = 'd14q6vju7s12ir.cloudfront.net'
s3_destination_bucket = 'adso-vod-workflow-template-destination-d25pp6byo9pp'
PROFILE_STREAMS = (
{
'bitrate': '7800k',
'bitrate_max': '7820k',
'audio_bitrate': '128k',
'buffer_size': '7800k',
'resolution_16_9': '1920x1080',
'resolution_4_3': '1440x1080',
'profile': 'high -bsf:v h264_mp4toannexb -preset slow',
'video_codec': 'libx264',
'name_append': '_1080p'
},
{
'bitrate': '4500k',
'bitrate_max': '4510k',
'audio_bitrate': '128k',
'buffer_size': '4500k',
'resolution_16_9': '1280x720',
'resolution_4_3': '960x720',
'profile': 'high -level:v 4.0 -preset slow',
'video_codec': 'libx264',
'name_append': '_720p'
},
{
'bitrate': '2000k',
'bitrate_max': '2240k',
'audio_bitrate': '96k',
'buffer_size': '2000k',
'resolution_16_9': '1024x576',
'resolution_4_3': '720x576',
'profile': 'main -level:v 4.0 -preset slow',
'video_codec': 'libx264',
'name_append': '_576'
},
{
'bitrate': '700k',
'bitrate_max': '840k',
'audio_bitrate': '64k',
'buffer_size': '700k',
'resolution_16_9': '640x360',
'resolution_4_3': '512x384',
'profile': 'main -level:v 4.0 -preset slow',
'video_codec': 'libx264',
'name_append': '_360'
},
{
'bitrate': '365k',
'bitrate_max': '400k',
'audio_bitrate': '64k',
'buffer_size': '365k',
'resolution_16_9': '480x272',
'resolution_4_3': '480x250',
'profile': 'main -level:v 4.0 -preset slow',
'video_codec': 'libx264',
'name_append': '_272'
}
)
variants = ['365k', '700k', '2000k', '4500k', '7800k']
def main():
#below lines need to be fixed
# with open('out_urls.csv', 'wb') as f:
# added_scenes = csv.writer(f)
# added_scenes.writerow(
# ['nombre', 'url_master.m3u8', 'duration'])
for subdir, dirs, files in os.walk('./'):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".mp4"):
format_filename(file)
hls_encode(file)
def format_filename(s):
"""
Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
Instead of returning the filename is modified for renaming the file
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ', '_')
os.rename(s, filename)
def hls_encode(file):
base = os.path.basename(file)
out_name = os.path.splitext(base)[0]
directory = os.path.abspath(os.getcwd()) + '/' + out_name
thumbs_out = os.path.join(directory, 'thumbnails')
trailer_out = os.path.join('trailer')
try:
aspect_ratio = mediainfo_get_prop_value(
file, 'Video', 'DisplayAspectRatio_Original/String')
if not aspect_ratio:
aspect_ratio = mediainfo_get_prop_value(
file, "Video", "DisplayAspectRatio/String")
except ValueError as e:
raise ValueError(e)
try:
width = mediainfo_get_prop_value(file, "Video", "Width")
except ValueError as e:
raise ValueError(e)
try:
duration = mediainfo_get_prop_value(file, "Video", "Duration")
except ValueError as e:
raise ValueError(e)
try:
duration_TC = mediainfo_get_prop_value(
file, "Video", "Duration/String3")
except ValueError as e:
raise ValueError(e)
def thumbnails_time_divider(duration, jumps, jump_miliseconds):
jump_size = int(duration)/jumps
pointer = 0
points = []
for i in range(jumps):
try:
points.append(pointer)
pointer += jump_size - jump_miliseconds
except:
pass
return points
def trailer_cmd(times, file, scene_duration):
'''
Returns this ffmpeg command that will create a mini trailer of the input video file
# 'ffmpeg -i multi_audio_test_no_audio.mp4 -vf "select='between(t, 4, 6.5)+between(t, 17, 26)+between(t, 74, 91)', setpts=N/FRAME_RATE/TB" tes.mp4'
The number of seconds of each scene can be changed in the
'''
cmd = 'ffmpeg -i {} -an '.format(file)
cmd += "-vf "
cmd += '"select='
cmd += "'"
counter = 0
scene_number_of_seconds = scene_duration
for time in times:
if counter == 0:
cmd += 'between(t,{},{})'.format(time/1000,
time/1000 + scene_number_of_seconds)
else:
cmd += '+between(t,{},{})'.format(time/1000,
time/1000 + scene_number_of_seconds)
counter += 1
cmd += "'"
cmd += ', setpts=N/FRAME_RATE/TB" '
cmd += "-s {} ".format("426x240")
cmd += "{}/{}_trailer.mp4".format(trailer_out, out_name)
return cmd
def get_profile(variant, aspect_ratio, PROFILE_STREAMS):
for profile in PROFILE_STREAMS:
if aspect_ratio == '4:3':
profile_resol = profile['resolution_4_3']
profile_width = profile_resol.split("x")
if int(width) >= int(profile_width[0]) and profile['bitrate'] == variant:
return profile
else:
profile_resol = profile['resolution_16_9']
profile_width = profile_resol.split("x")
if int(width) >= int(profile_width[0]) and profile['bitrate'] == variant:
return profile, profile_resol
def create_command(directory, file, stream):
"""
Creates the ffmpeg single pass commands to generate the variants before creating the .m3u8 master playlist.
:param args:
:param kwargs:
:return: the cmd as string
"""
output_name = out_name + stream[0]['name_append'] + '.mp4'
output = os.path.join(directory, output_name)
cmd = 'ffmpeg -y '
cmd += '-i {} -c:v libx264 -profile:v {} -s {} '.format(
file, stream[0]['profile'], stream[1])
cmd += '-g 50 -keyint_min 50 -sc_threshold 0 -bf 3 -b_strategy 2 -refs 2 -coder 0 '
cmd += '-b:v {} -maxrate {} -bufsize {} -pix_fmt yuv420p '.format(
stream[0]['bitrate'], stream[0]['bitrate_max'], stream[0]['buffer_size'])
cmd += '-c:a aac -b:a {} -ar 44100 -movflags faststart -map 0:v -map 0:a {}'.format(
stream[0]['audio_bitrate'], output)
return cmd
def create_master_playlist():
"""
Create the command to generate the master playlist (master.m3u8) for the profile.
Run the 'mp4-hls.py' script
:param args:
:param kwargs:
:return:
"""
mezzanines = []
for subdir, dirs, files in os.walk(directory):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".mp4"):
if not filepath.endswith("trailer.mp4"):
mezza = ''.join(file)
mezzanines.append("'%s/%s' " % (directory, mezza))
cmd = "python3 scripts/utils/mp4-hls.py -o '{outputs}' -f {mezzanines}".format(
outputs=directory,
mezzanines=''.join(mezzanines)
)
return cmd
def aes_128_key():
try:
os.remove('enc.key')
except OSError:
print('KEY_ERROR:', OSError)
pass
cmd='openssl rand 16 > enc.key'
return cmd
def create_master_playlist_with_subtitles_and_encryption():
"""
mp4-hls.py command with subtitles and encryption
"""
mezzanines = []
subtitles = []
current_file_name = ""
# generate key
run(aes_128_key())
for subdir, dirs, files in os.walk(directory):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".mp4"):
if not filepath.endswith("trailer.mp4"):
current_file_name = file[:-8]
# print("FILE:", current_file_name)
# print("*"*50)
mezza = ''.join(file)
mezzanines.append("'%s/%s' " % (directory, mezza))
for subdir, dirs, files in os.walk(os.getcwd()):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".srt") and file[:-7] == current_file_name :
subtitles.append(subtitles_assembler(file))
cmd = 'python scripts/utils/mp4-hls.py --encryption-key=$(hexdump -e {x} enc.key) --output-encryption-key {mezzanines} {subtitles}'.format(
x="""'"%x"'""",
mezzanines=''.join(mezzanines),
subtitles=''.join(subtitles)
)
return cmd
def move_bento4_output_into_movie_folder():
cmd = 'mv output {directory}'.format(
directory=directory
)
run(cmd)
def create_output_folders(directory):
if not os.path.exists(directory):
os.makedirs(directory)
if len(sys.argv)>1:
if sys.argv[2] == 'trailer':
trailer = os.path.join(directory, "trailer")
os.makedirs(trailer)
def run(cmd, shell=False):
print("cmd START:", cmd)
p = subprocess.check_output(cmd, shell=True)
print("cmd finished")
selected_streams = []
commands = []
create_output_folders(directory)
def subtitles_assembler(in_file):
"""
looks for srt subtitles with same name as file is being processed. Extract two-letter iso code. Convert them into vtt and returns the subtitle string needed on mp4-hls.py script"
"""
base_name = os.path.splitext(in_file)[0]
subtitle_string_for_mp42hls = ""
filepath = os.path.join(os.getcwd(),in_file)
file_base = os.path.splitext(in_file)[0]
size = len(file_base)
if base_name == file_base:
output_vtt = os.path.join(os.getcwd() , os.path.splitext(os.path.basename(filepath))[0] + '.vtt')
language = os.path.splitext(os.path.basename(filepath))[0][-2:]
# convert srt into vtt
cmd = 'ffmpeg -y -i {filepath} {output}'.format(
filepath=filepath,
output=output_vtt
)
run(cmd)
single_subt_string = '[+format=webvtt,+language={language}]{output_vtt} '.format(
language=language,
output_vtt=output_vtt
)
subtitle_string_for_mp42hls += single_subt_string
return subtitle_string_for_mp42hls
def s3_upload():
"""
Upload output folder into aws s3 bucket using sync command
"""
cmd = 'aws s3 sync {variants_folder}/output/ s3://{destination_bucket}/{out_name}'.format(
variants_folder=directory,
destination_bucket=s3_destination_bucket,
out_name=out_name
)
print ('SYNC_COMMAND:', cmd)
return cmd
# add trailer support on arguments
# if len(sys.argv)>1:
# if sys.argv[2] == 'trailer':
# times = thumbnails_time_divider(duration, 12, 1000)
# trailer = trailer_cmd(times, file, 0.8)
# run(trailer)
for variant in variants:
stream = get_profile(variant, aspect_ratio, PROFILE_STREAMS)
if stream:
selected_streams.append(stream)
for stream in selected_streams:
commands.append(create_command(directory, file, stream))
for cmd in commands:
run(cmd)
m3u8_cmd = create_master_playlist_with_subtitles_and_encryption()
try:
run(m3u8_cmd)
except subprocess.CalledProcessError as e:
print("ERROR:",e.output)
try:
os.rmdir("output")
run(m3u8_cmd)
except subprocess.CalledProcessError as e:
print("ERROR_2:",e.output)
pass
move_bento4_output_into_movie_folder()
#upload output folder into s3 bucket
if len(sys.argv)>1:
if sys.argv[1] == 'sync_bucket':
sync_cmd = s3_upload()
run(sync_cmd)
# fill csv with master and trailer url
cloudfront_url = os.path.join(cloudfront_domain, out_name, 'master.m3u8')
with open('out_urls.csv', 'a') as f:
added_scenes = csv.writer(f)
added_scenes.writerow(
[out_name, cloudfront_url, duration_TC])
def mediainfo_get_prop_value(fq_file_name, ptype, prop_name):
"""
Get the value of a single mediainfo property.
Run 'mediainfo --Info-Parameters' to get an exhaustive list.
Example: 'mediainfo --Inform=Video;%Duration% /home/myuser/test_file.avi'
:param fq_file_name:
:ptype: mediainfo property type (e.g. 'Video', 'Audio',...)
:param prop_name: mediainfo property name (e.g. 'Count', 'Title, 'Language',...)
:return:
"""
cmd = ['mediainfo', '--Inform=%s;%%%s%%\\n' %
(ptype, prop_name), fq_file_name]
pout = subprocess.Popen(cmd, stdout=subprocess.PIPE)
prop_value = pout.communicate()[0].strip()
return prop_value
if __name__ == "__main__":
main()
# %%
|
""" Functions for converting coordinates """
import utm
from pyproj import CRS
def utm_crs_from_latlon(lat, lon):
""" Determines the UTM CRS from a given lat lon point
:param lat: The latitude
:type lat: float
:param lon: The longitude
:type lon: float
:return: A coordinate system for the associated UTM zone
:rtype: class:`pyroj.CRS`
"""
_, _, zone, _ = utm.from_latlon(lat, lon)
if lat >= 0:
epsg = '326' + str(zone)
else:
epsg = '327' + str(zone)
return CRS.from_user_input(int(epsg))
|
txt1 = 'A tale that was not right'
txt2 = '이 또한 지나가리라.'
print(txt1[24])
print(txt2[-2])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# otra_app56.py
#
from Tkinter import *
def Call(): # Definimos la funcion
lab= Label(root, text = 'Usted presiono\nel boton')
lab.pack()
boton['bg'] = 'blue' # Al presionar queda azul
boton['fg'] = 'white' # Si pasamos el Mouse queda blanco
root = Tk() # Ventana de fondo
root.geometry('100x110+350+70') # Geometría de la ventana
boton = Button(root, text = 'Presionar', command = Call)
boton.pack()
root.mainloop()
|
#!/usr/bin/env python
#
# Copyright (c) 2007-2008, Corey Goldberg (corey@goldb.org)
#
# license: GNU LGPL
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#Note: The code was adjusted to always extract the stock price of any given date between 1995-2016 and the stock prices of the two previous days. (the end_date is the chosen date)
#from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory, GoodFriday
import urllib
'''
This is the "ystockquote" module.
This module provides a Python API for retrieving stock data from Yahoo Finance.
This module contains the following functions:
get_all(symbol)
get_price(symbol)
get_change(symbol)
get_volume(symbol)
get_avg_daily_volume(symbol)
get_stock_exchange(symbol)
get_market_cap(symbol)
get_book_value(symbol)
get_ebitda(symbol)
get_dividend_per_share(symbol)
get_dividend_yield(symbol)
get_earnings_per_share(symbol)
get_52_week_high(symbol)
get_52_week_low(symbol)
get_50day_moving_avg(symbol)
get_200day_moving_avg(symbol)
get_price_earnings_ratio(symbol)
get_price_earnings_growth_ratio(symbol)
get_price_sales_ratio(symbol)
get_price_book_ratio(symbol)
get_short_ratio(symbol)
get_historical_prices(symbol, start_yyyymmdd, end_yyyymmdd)
sample usage:
>>> import ystockquote
>>> print ystockquote.get_price('GOOG')
529.46
'''
def __request(symbol, stat):
url = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (symbol, stat)
return urllib.urlopen(url).read().strip().strip('"')
def get_all(symbol):
"""
Get all available quote data for the given ticker symbol.
Returns a dictionary.
"""
values = __request(symbol, 'l1c1va2xj1b4j4dyekjm3m4rr5p5p6s7').split(',')
data = {}
data['price'] = values[0]
data['change'] = values[1]
data['volume'] = values[2]
data['avg_daily_volume'] = values[3]
data['stock_exchange'] = values[4]
data['market_cap'] = values[5]
data['book_value'] = values[6]
data['ebitda'] = values[7]
data['dividend_per_share'] = values[8]
data['dividend_yield'] = values[9]
data['earnings_per_share'] = values[10]
data['52_week_high'] = values[11]
data['52_week_low'] = values[12]
data['50day_moving_avg'] = values[13]
data['200day_moving_avg'] = values[14]
data['price_earnings_ratio'] = values[15]
data['price_earnings_growth_ratio'] = values[16]
data['price_sales_ratio'] = values[17]
data['price_book_ratio'] = values[18]
data['short_ratio'] = values[19]
def get_price(symbol):
return __request(symbol, 'l1')
def get_change(symbol):
return __request(symbol, 'c1')
def get_volume(symbol):
return __request(symbol, 'v')
def get_avg_daily_volume(symbol):
return __request(symbol, 'a2')
def get_stock_exchange(symbol):
return __request(symbol, 'x')
def get_market_cap(symbol):
return __request(symbol, 'j1')
def get_book_value(symbol):
return __request(symbol, 'b4')
def get_ebitda(symbol):
return __request(symbol, 'j4')
def get_dividend_per_share(symbol):
return __request(symbol, 'd')
def get_dividend_yield(symbol):
return __request(symbol, 'y')
def get_earnings_per_share(symbol):
return __request(symbol, 'e')
def get_52_week_high(symbol):
return __request(symbol, 'k')
def get_52_week_low(symbol):
return __request(symbol, 'j')
def get_50day_moving_avg(symbol):
return __request(symbol, 'm3')
def get_200day_moving_avg(symbol):
return __request(symbol, 'm4')
def get_price_earnings_ratio(symbol):
return __request(symbol, 'r')
def get_price_earnings_growth_ratio(symbol):
return __request(symbol, 'r5')
def get_price_sales_ratio(symbol):
return __request(symbol, 'p5')
def get_price_book_ratio(symbol):
return __request(symbol, 'p6')
def get_short_ratio(symbol):
return __request(symbol, 's7')
#Adjusted to receive two inputs: symbol and chosen_date instead of symbol, start_date, end_date
def get_historical_prices(symbol, chosen_date):
"""
Get historical prices for the given ticker symbol.
Date format is 'YYYY-MM-DD'
Returns a nested list.
"""
import datetime
from datetime import date
from datetime import timedelta
#Test
# >>> chosen_date = '2016-05-10'
# >>> year = int(chosen_date[:4])
# >>> month = int(chosen_date[5:7])
# >>> day = int(chosen_date[8:])
# >>> end_date = datetime.date(year, month, day)
# >>> start_date = str(end_date - datetime.timedelta(days=2))
past_n_days = 10 #fixed because we only care about the stock price of the chosen day and the stock prices of the two previous days
year = int(chosen_date[:4])
month = int(chosen_date[5:7])
day = int(chosen_date[8:])
end_date = datetime.date(year, month, day)
if end_date > datetime.date.today():
statement = "Choose any date before today: " + str(datetime.date.today())
d0 = end_date
d1 = datetime.date.today()
delta = d0 - d1
past_n_days += delta.days
# from datetime import date
# d0 = date(2008, 8, 18)
# d1 = date(2008, 9, 26)
# delta = d0 - d1
# print delta.days
if end_date == datetime.date.today():
past_n_days += 1
#assert end_date < datetime.date.today(), "chosen date must be any previous day from today: %r" % end_date
#assert num == 4, "len of set is not 4: %r" % num #example
#List of dates:
date_list = [end_date - datetime.timedelta(days=x) for x in range(0, 3)]
# >>> date_list = [end_date - datetime.timedelta(days=x) for x in range(0, 3)]
# >>> print date_list
# [datetime.date(2016, 5, 10), datetime.date(2016, 5, 9), datetime.date(2016, 5, 8)]
#start_date = str(end_date - datetime.timedelta(days=past_n_days)) #doesn't work when we previously put from datetime import datetime
start_date = str(end_date - timedelta(days=past_n_days)) #code is always functional
end_date = chosen_date
# #month, day and year
url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \
'd=%s&' % str(int(end_date[5:7]) - 1) + \
'e=%s&' % str(int(end_date[8:10])) + \
'f=%s&' % str(int(end_date[0:4])) + \
'g=d&' + \
'a=%s&' % str(int(start_date[5:7]) - 1) + \
'b=%s&' % str(int(start_date[8:10])) + \
'c=%s&' % str(int(start_date[0:4])) + \
'ignore=.csv'
print "url"
print url
days = urllib.urlopen(url).readlines()
data = [day[:-2].split(',') for day in days]
return data
if __name__ == "__main__":
print get_historical_prices('GOOG', '2016-05-30')
#output
##if __name__ == "__main__":
## print get_historical_prices('GOOG', '2016-05-30')
# Andreas-MacBook-Pro-2:StockPredictor andreamelendezcuesta$ python ystockquote_edited.py
# url
# http://ichart.yahoo.com/table.csv?s=GOOG&d=4&e=30&f=2016&g=d&a=4&b=16&c=2016&ignore=.csv
# [['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'],
# ['2016-05-25', '720.76001', '727.51001', '719.705017', '725.27002', '1629200', '725.2700'],
# ['2016-05-24', '706.859985', '720.969971', '706.859985', '720.090027', '1920400', '720.09002'],
# ['2016-05-23', '706.530029', '711.478027', '704.179993', '704.23999', '1320900', '704.2399'],
# ['2016-05-20', '701.619995', '714.580017', '700.52002', '709.73999', '1816000', '709.7399'],
# ['2016-05-19', '702.359985', '706.00', '696.799988', '700.320007', '1656300', '700.32000'],
# ['2016-05-18', '703.669983', '711.599976', '700.630005', '706.630005', '1763400', '706.63000'],
# ['2016-05-17', '715.98999', '721.52002', '704.109985', '706.22998', '1999500', '706.2299'],
# ['2016-05-16', '709.130005', '718.47998', '705.650024', '716.48999', '1316200', '716.4899']]
## if __name__ == "__main__":
## print get_historical_prices('GOOG', '2016-05-26')
# Andreas-MacBook-Pro-2:StockPredictor andreamelendezcuesta$ python ystockquote_edited.py
# url
# http://ichart.yahoo.com/table.csv?s=GOOG&d=4&e=26&f=2016&g=d&a=4&b=15&c=2016&ignore=.csv
# [['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'],
# ['2016-05-25', '720.76001', '727.51001', '719.705017', '725.27002', '1629200', '725.2700'],
# ['2016-05-24', '706.859985', '720.969971', '706.859985', '720.090027', '1920400', '720.09002'],
# ['2016-05-23', '706.530029', '711.478027', '704.179993', '704.23999', '1320900', '704.2399'],
# ['2016-05-20', '701.619995', '714.580017', '700.52002', '709.73999', '1816000', '709.7399'],
# ['2016-05-19', '702.359985', '706.00', '696.799988', '700.320007', '1656300', '700.32000'],
# ['2016-05-18', '703.669983', '711.599976', '700.630005', '706.630005', '1763400', '706.63000'],
# ['2016-05-17', '715.98999', '721.52002', '704.109985', '706.22998', '1999500', '706.2299'],
# ['2016-05-16', '709.130005', '718.47998', '705.650024', '716.48999', '1316200', '716.4899']]
##if __name__ == "__main__":
## print get_historical_prices('GOOG', '2016-05-10')
# Andreas-MacBook-Pro-2:StockPredictor andreamelendezcuesta$ python ystockquote_edited.py
# url
# http://ichart.yahoo.com/table.csv?s=GOOG&d=4&e=10&f=2016&g=d&a=3&b=30&c=2016&ignore=.csv
# [['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'],
# ['2016-05-10', '716.75', '723.50', '715.719971', '723.179993', '1563100', '723.17999'],
# ['2016-05-09', '712.00', '718.710022', '710.00', '712.900024', '1508400', '712.90002'],
# ['2016-05-06', '698.380005', '711.859985', '698.106995', '711.119995', '1826100', '711.11999'],
# ['2016-05-05', '697.700012', '702.320007', '695.719971', '701.429993', '1677400', '701.42999'],
# ['2016-05-04', '690.48999', '699.75', '689.01001', '695.700012', '1688600', '695.70001'],
# ['2016-05-03', '696.869995', '697.840027', '692.00', '692.359985', '1531000', '692.35998'],
# ['2016-05-02', '697.630005', '700.640015', '691.00', '698.210022', '1644100', '698.21002']]
#original function ########################################
# def get_historical_prices(symbol, start_date, end_date):
# """
# Get historical prices for the given ticker symbol.
# Date format is 'YYYYMMDD'
# Returns a nested list.
# """
# url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \
# 'd=%s&' % str(int(end_date[4:6]) - 1) + \
# 'e=%s&' % str(int(end_date[6:8])) + \
# 'f=%s&' % str(int(end_date[0:4])) + \
# 'g=d&' + \
# 'a=%s&' % str(int(start_date[4:6]) - 1) + \
# 'b=%s&' % str(int(start_date[6:8])) + \
# 'c=%s&' % str(int(start_date[0:4])) + \
# 'ignore=.csv'
# print "url"
# print url
# days = urllib.urlopen(url).readlines()
# data = [day[:-2].split(',') for day in days]
# #print data
# return data
# if __name__ == "__main__":
# print get_historical_prices('GOOG', '20160321', '20160323')
# Andreas-MBP-2:StockPredictor andreamelendezcuesta$ python ystockquote.py
# url
# http://ichart.yahoo.com/table.csv?s=GOOG&d=2&e=23&f=2016&g=d&a=2&b=21&c=2016&ignore=.csv
# [['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'],
# ['2016-03-23', '742.359985', '745.719971', '736.150024', '738.059998', '1421900', '738.05999'],
# ['2016-03-22', '737.460022', '745.00', '737.460022', '740.75', '1264400', '740.7'],
# ['2016-03-21', '736.50', '742.50', '733.515991', '742.090027', '1831800', '742.09002']]
#######Alternative function, argument: past_n_days ###################
# from datetime import datetime, timedelta
# def get_historical_prices(symbol, past_n_days):
# """
# Get historical prices for the given ticker symbol.
# Date format is 'YYYY-MM-DD'
# Returns a nested list.
# """
# end_date = str(datetime.now())[:10]
# start_date= str(datetime.now() - timedelta(days=past_n_days))[:10]
# # #month, day and year
# url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \
# 'd=%s&' % str(int(end_date[5:7]) - 1) + \
# 'e=%s&' % str(int(end_date[8:10])) + \
# 'f=%s&' % str(int(end_date[0:4])) + \
# 'g=d&' + \
# 'a=%s&' % str(int(start_date[5:7]) - 1) + \
# 'b=%s&' % str(int(start_date[8:10])) + \
# 'c=%s&' % str(int(start_date[0:4])) + \
# 'ignore=.csv'
# print "url"
# print url
# days = urllib.urlopen(url).readlines()
# data = [day[:-2].split(',') for day in days]
# return data
# if __name__ == "__main__":
# print get_historical_prices('GOOG', 2)
# Andreas-MacBook-Pro-2:StockPredictor andreamelendezcuesta$ python ystockquote_prev.py
# url
# http://ichart.yahoo.com/table.csv?s=GOOG&d=2&e=16&f=2016&g=d&a=2&b=14&c=2016&ignore=.csv
# [['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'],
# ['2016-03-16', '726.369995', '737.469971', '724.51001', '736.090027', '1572300', '736.09002'],
# ['2016-03-15', '726.919983', '732.289978', '724.77002', '728.330017', '1720100', '728.33001'],
# ['2016-03-14', '726.809998', '735.50', '725.150024', '730.48999', '1716900', '730.4899']]
|
'''
inherit ParmEd' Structure class
'''
from parmed import Structure as _Structure
import parmed as pmd
import numpy as np
def load_file(*args, **kwd):
p_struct = pmd.load_file(*args, **kwd)
new_struct = Structure()
new_struct.atoms = p_struct.atoms
new_struct.residues = p_struct.residues
new_struct.bonds = p_struct.bonds
new_struct.angles = p_struct.angles
new_struct.dihedrals = p_struct.dihedrals
new_struct.rb_torsions = p_struct.rb_torsions
new_struct.urey_bradleys = p_struct.urey_bradleys
new_struct.impropers = p_struct.impropers
new_struct.cmaps = p_struct.cmaps
new_struct.trigonal_angles = p_struct.trigonal_angles
new_struct.out_of_plane_bends = p_struct.out_of_plane_bends
new_struct.pi_torsions = p_struct.pi_torsions
new_struct.stretch_bends = p_struct.stretch_bends
new_struct.torsion_torsions = p_struct.torsion_torsions
new_struct.chiral_frames = p_struct.chiral_frames
new_struct.multipole_frames = p_struct.multipole_frames
new_struct.adjusts = p_struct.adjusts
new_struct.acceptors = p_struct.acceptors
new_struct.donors = p_struct.donors
return new_struct
class Structure(_Structure):
@property
def charge(self):
return np.array([atom.charge for atom in self.atoms])
@charge.setter
def charge(self, values):
for atom, c in zip(self.atoms, values):
atom.charge = c
@property
def mass(self):
return np.array([atom.mass for atom in self.atoms])
@charge.setter
def mass(self, values):
for atom, m in zip(self.atoms, values):
atom.mass = m
|
import os, sys, csv
import numpy as np
data = []
with open("wifiscan_1.csv", "rt") as csvfile:
reader = csv.reader(csvfile, delimiter=";")
for row in reader:
data.append(row[1:-1])
data = np.asarray(data).astype(np.float)
print( data.shape )
#print data[0, :]
mean = np.mean(data, axis=0)
print( "mean=", mean.shape )
#max = np.max(data, axis=0)
#min = np.min(data, axis=0)
std = np.std(data, axis=0) + 1e-10
#print "std=", std.shape
normalized = (data - mean)/std
print( "normalized max", np.min(normalized,axis=0))
np.save("mean", mean)
np.save("std", std)
np.save("points", normalized)
|
import subprocess
import os
import shutil
from subprocess import call, check_output, Popen, PIPE
import json
from os.path import join as pjoin
from micompy.common.tools.tool import Tool
class BBmap(Tool):
def __init__(self, executable = "bbmap.sh", jni=True , kmer = 13):
Tool.__init__(self, name = "BBMap", executable = executable)
self.jni = "jni=t" if jni else "jni=f"
self.kmer = "k=%d" % (kmer)
def make_index(self, genome):
with open(os.devnull, 'w') as handle:
call([self.executable, "ref="+genome.genome, "path="+genome.path, self.kmer], stdout=handle, stderr=handle)
def get_ANI(self, genome, reads, target_id = 0.1):
FNULL = open(os.devnull, 'w')
cmd = Popen([self.executable, "ref="+genome.genome, "path="+genome.path, "in="+reads, "out=/dev/null", "minid=" +str(target_id), self.jni, self.kmer], stderr = PIPE, stdout=PIPE)
out_lines = cmd.stderr.readlines()
FNULL.close()
coverage = [float(l.split("\t")[1].replace("%","")) for l in out_lines if "mapped:" in l][0]
ANI = [float(l.split("\t")[3].replace("%","")) for l in out_lines if "Match Rate:" in l][0]
return {'ANI' : ANI, 'coverage' : coverage}
|
#coding:utf-8
import cv2 as cv
import numpy as np
def video_demo():
capture = cv.VideoCapture(0)
while(True):
ret,frame = capture.read()
frame = cv.flip(frame,1)
cv.imshow('video',frame)
c = cv.waitKey(50)
if c == 27:
break
def get_image_info(image):
print(type(image))
print(image.shape)
print(image.size)
print(image.dtype)
pixel_data = np.array(image)
print(pixel_data)
src = cv.imread("D:\python_file\Opencv3_study_file\images\!face.png")
cv.namedWindow("NO.1 image",cv.WINDOW_AUTOSIZE)
cv.imshow("NO.1 image",src)
gray = cv.cvtColor(src,cv.COLOR_BGR2GRAY)
#注意千万不要选择中文目录,否则无法保存
cv.imwrite("D:/python_file/Opencv3_study_file/images/face_gray.png",gray)
cv.waitKey(0)
cv.destroyAllWindows()
print("Hi,Python!")
|
import logging
import simpy
class Restaurant:
def __init__(self, env, id, kitchencount, x, y):
self.id = id
self.env = env
self.name = "RE-%d" % id
self.x = x
self.y = y
self.orderStore = simpy.FilterStore(env)
self.kitchen = simpy.Resource(env, kitchencount)
logging.debug("%s: Setup at X:%d,Y:%d" % (self.name, x, y))
def prepare_food(self, order):
logging.debug("%s (O%d): RECEIVED at %s" % (self.name, order.id, self.env.now))
with self.kitchen.request() as req:
yield req
logging.info("%s (O%d): PREP_STARTED at %s" % (self.name, order.id, self.env.now))
yield self.env.timeout(order.dish.prep_time())
yield self.orderStore.put(order)
logging.info("%s (O%d): #COOKED# at %s" % (self.name, order.id, self.env.now))
def handover_food(self, order):
yield self.orderStore.get(lambda o: o.id == order.id)
logging.debug("%s (O%d): HANDED_OVER at %s" % (self.name, order.id, self.env.now))
|
def solution(n):
Fibo = [0, 1]
for i in range(2, n + 1):
Fibo.append(Fibo[i - 1] + Fibo[i - 2])
return Fibo[n] % 1234567
|
""""
Soumil Nitin Shah
Bachelor in Electronic Engineering
Master in Electrical Engineering
Master in Computer Engineering
Graduate Teaching/Research Assistant
Python Developer
soushah@my.bridgeport.edu
"""
import sqlite3
def my_database():
"""
:return: Nothing
"""
# define the connection
conn = sqlite3.connect("eg1.db")
#define the cursor
cursor = conn.cursor()
# create Tables
cursor.execute("""
CREATE TABLE IF NOT EXISTS my_table (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
temperature TEXT,
humidity TEXT)""")
# Add data to database
cursor.execute("""
INSERT INTO my_table (temperature,humidity) VALUES (4,3)""")
# perform 3C
#COMMIT, CLOSE CLOSE
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
my_database()
|
class MyChatRole:
def __init__(self, role_name: str, msg_header: str = None):
self.__role_name = role_name
self.__msg_header = role_name if msg_header is None else msg_header
@property
def role_name(self):
return self.__role_name
@property
def msg_header(self):
return self.__msg_header
def role_msg(self, msg: str, end: str = '\n'):
return f'{self.__msg_header} : {msg}{end}'
role_sys = MyChatRole('system', 'sys')
role_user = MyChatRole('user', 'usr')
role_anonym = MyChatRole('matrix', 'neo')
|
# keeping bot alive on repl.it https://www.codementor.io/@garethdwyer/building-a-discord-bot-with-python-and-repl-it-miblcwejz
from flask import Flask, request
from threading import Thread
import json
app = Flask('')
@app.route('/')
def home():
return "I'm alive"
@app.route('/refresh', methods = ['POST'])
def refresh():
print(f"repl.deploy{json.loads(request.get_data())}{request.headers.get('Signature')}")
deploy = input()
print("repl.deploy-success")
return json.loads(deploy)
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
'''
Retrieve REST API endpoints for different services
'''
from . import credentials
def get(service_type, endpoint_type='publicURL'):
""" Retrieve the service endpoint URL """
ks = credentials.keystone()
return ks.service_catalog.url_for(service_type=service_type,
endpoint_type=endpoint_type)
def image():
""" Retrieve the image service (glance) endpoint """
return get('image')
def identity():
""" Retrieve the identity service (keystone) endpoint """
return get('identity')
def compute():
""" Retrieve the compute service (nova) endpoint """
return get('compute')
def network():
""" Retrieve the network service (neutron) endpoint """
return get('network')
def volume():
""" Retrieve the volume service (cinder) endpoint """
return get('volume')
def object_store():
""" Retrieve the object store (swift) endpoint """
return get('object-store')
|
# Generated by Django 3.0.2 on 2020-02-01 14:40
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osmcal', '0013_remove_event_created_by'),
]
operations = [
migrations.AlterField(
model_name='event',
name='description',
field=models.TextField(blank=True, help_text='Tell people what the event is about and what they can expect. You may use Markdown in this field.', null=True),
),
migrations.CreateModel(
name='ParticipationQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('answer_type', models.CharField(choices=[('TEXT', 'Text Field'), ('CHOI', 'Choice'), ('BOOL', 'Boolean')], max_length=4)),
('mandatory', models.BooleanField(default=True)),
('quota', models.PositiveIntegerField(blank=True, null=True)),
('choices', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('event', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions', to='osmcal.Event')),
],
),
]
|
import os
import sqlite3
import uuid
import config
def CreateDatasetExportDatabse(datasetId: str):
filename = datasetId + ".sqlite"
dbpath = os.path.join(config.ExportDatasetPath, filename)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
create_table_schema = """CREATE TABLE IF NOT EXISTS DOMO_Schema (
Id text PRIMARY KEY,
CreatedDate text);"""
create_table_schema_detail = """CREATE TABLE IF NOT EXISTS DOMO_Schema_Detail (
Id text PRIMARY KEY,
Schema_Id text,
Label text,
Type text,
FOREIGN KEY (Schema_Id) REFERENCES DOMO_Schema(Id));"""
create_table_data = """CREATE TABLE IF NOT EXISTS DOMO_Data (
Id text PRIMARY KEY,
Import_Date text,
Row_Id text,
Col_Id text,
Value text,
FOREIGN KEY(Col_Id) REFERENCES DOMO_Schema_Detail(Id));"""
if conn is not None:
cur.execute(create_table_schema)
cur.execute(create_table_schema_detail)
cur.execute(create_table_data)
else:
print("Cannot connect to sqlite database at: " + dbpath)
|
# 1+11+111+1111+11111+...的和
def fun(n):
sum = 0
x = 0
for i in range(1, n):
x = x*10 +1
sum += x
print(sum)
fun(6)
|
import numpy as np
def numpy_vstack_2d_default(array1, array2, default_value=np.nan):
if len(array1) == 0:
return array2
if len(array2) == 0:
return array1
if np.ndim(array1) == 1:
array1 = np.reshape(array1, (1,len(array1)))
if np.ndim(array2) == 1:
array2 = np.reshape(array2, (1,len(array2)))
shape1 = np.shape(array1)
shape2 = np.shape(array2)
if shape1[1] == shape2[1]:
return np.vstack((array1, array2))
elif shape1[1] > shape2[1]:
# add default values to array1
new_values = np.ones((shape2[0], shape1[1] - shape2[1]))
new_values[:] = default_value
return np.vstack((array1, np.hstack((array2, new_values))))
else:
# add default values to array2
new_values = np.ones((shape1[0], shape2[1] - shape1[1]))
new_values[:] = default_value
return np.vstack((np.hstack((array1, new_values)), array2))
|
import sys
import sdl2.ext
RESOURCES = sdl2.ext.Resources(__file__, "resources")
sdl2.ext.init()
window = sdl2.ext.Window("Hello World!", size=(640, 480))
window.show()
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
sprite = factory.from_image(RESOURCES.get_path("hello.bmp"))
spriterenderer = factory.create_sprite_render_system(window)
spriterenderer.render(sprite)
|
import pymongo
from menu import Menu
from models.post import Post
from models.blog import Blog
from database import Database
Database.initialize()
menu = Menu()
menu.run_menu()
|
# -*- coding: utf-8 -*-
try:
import pycurl222
from cStringIO import StringIO
curl = True
except Exception, e:
import urllib2
curl = False
def request(url):
"""蜘蛛抓取"""
if curl:
return _curlRequest(url)
else:
return _urllibRequest(url)
def _curlRequest(url):
"""curl下载函数"""
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
sio = StringIO()
c.setopt(pycurl.WRITEFUNCTION, sio.write)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 5)
# c.setopt(pycurl.ENCODING, '')
c.setopt(pycurl.USERAGENT, 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7')
# c.setopt(pycurl.PROXY, '192.168.0.138:8888')
# c.setopt(pycurl.PROXY, '127.0.0.1:9050')
# c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
c.perform()
c.close()
return sio.getvalue()
def _urllibRequest(url):
"""urllib下载函数"""
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7')
return urllib2.build_opener().open(req).read()
|
# dp[i]表示前i个石头先手能不能赢
# 如果i是平方数,显示先手必赢
# 如果dp[i - j*j]必输,那么此时先手也必赢
class Solution:
def winnerSquareGame(self, n: int) -> bool:
sq, dp = sqrt(n), [False] * (n+1)
for i in range(0, n+1):
if not dp[i]:
for j in range(1, int(sq) + 1):
if i + j*j <= n:
dp[i + j*j] = True
return dp[n]
|
'''API wrapper class'''
# pylint: disable=I0011,C0103
import json
import os
import pprint
import requests
class WeatherAPI(object):
"""docstring for WeatherAPI"""
@staticmethod
def get_weather(apikey, location):
'''Wrapper function for getting weather by zipcode'''
# OpenWeatherMap call
# uri = "http://api.openweathermap.org/data/2.5/weather?zip={},us{}".format(location, apikey)
# Wunderground call
# uri = "http://api.wunderground.com/api/{}/conditions/q/{}.json".format(apikey, location)
# OpenWeatherMap 2020 call
uri = "https://community-open-weather-map.p.rapidapi.com/weather"
callback = "test"
# print("WeatherAPI uri: ", uri)
# querystring = {"lat":"0","lon":"0","callback":"test","id":"2172797","lang":"null","units":"imperial","q":"London,uk"}
querystring = {"lat":"0","lon":"0","callback":callback,"id":"2172797","lang":"null","units":"imperial","q":location}
headers = {
'x-rapidapi-host': "community-open-weather-map.p.rapidapi.com",
'x-rapidapi-key': apikey
}
resp = requests.request("GET", uri, headers=headers, params=querystring)
return json.loads(resp.text.strip(callback).strip('(').rstrip(')'))
#resp = resp.strip([''.join(callback,'(')])
#resp = resp.rstrip(')')
# Function stub for testing
# This is a hardcoded sample json response from the API endpoint
# response = 'test({"coord":{"lon":-0.13,"lat":51.51},"weather":[{"id":804,"main":"Clouds","description":"overcast clouds","icon":"04n"}],"base":"stations","main":{"temp":62.26,"feels_like":54.03,"temp_min":61,"temp_max":64.4,"pressure":1000,"humidity":82},"visibility":10000,"wind":{"speed":18.34,"deg":230},"clouds":{"all":90},"dt":1604270626,"sys":{"type":1,"id":1414,"country":"GB","sunrise":1604213674,"sunset":1604248411},"timezone":0,"id":2643743,"name":"London","cod":200})'
# pp.pprint(type(response))
# pp.pprint(response)
# return json.loads(resp)
if __name__ == '__main__':
apikey = os.environ.get("OPENWEATHER_API_KEY","")
pp = pprint.PrettyPrinter()
# resp = WeatherAPI.get_weather(apikey,'Palo Alto, ca, usa')
# pp.pprint(resp)
# resp = WeatherAPI.get_weather(apikey,'Alameda,ca,usa')
# pp.pprint(resp)
resp = WeatherAPI.get_weather(apikey,'london, uk')
pp.pprint(type(resp))
pp.pprint(resp)
pp.pprint(resp["main"]["feels_like"])
|
from __future__ import absolute_import, division, print_function
from math import sqrt, log
import pygame
import random
import copy
import heapq
#Feel free to add extra classes and functions
class State:
# State constructor to initialize grid, player, parent, current coordinate, and
# options
def __init__(self, grid, player, parent, coord, options):
self.grid = grid
self.player = player
self.parent = parent
self.children = []
self.Q = 0
self.N = 0
self.coord = coord
self.counter = 0
self.grid_count = 11
self.options = options
if(coord is None):
self.terminalVal = False
else:
val = self.check_win()
self.terminalVal = val
# Adds child to states children
def add_child(self, child):
self.children.append(child)
# Gets continuous count in a specific direction
def get_continuous_count(self, r, c, dr, dc):
piece = self.grid[r][c]
result = 0
i = 1
while True:
new_r = r + dr * i
new_c = c + dc * i
if 0 <= new_r < self.grid_count and 0 <= new_c < self.grid_count:
if self.grid[new_r][new_c] == piece:
result += 1
else:
break
else:
break
i += 1
return result
# Check if this current board is a winner
def check_win(self):
grid = self.grid
r, c = self.coord
n_count = self.get_continuous_count(r, c, -1, 0)
s_count = self.get_continuous_count(r, c, 1, 0)
e_count = self.get_continuous_count(r, c, 0, 1)
w_count = self.get_continuous_count(r, c, 0, -1)
se_count = self.get_continuous_count(r, c, 1, 1)
nw_count = self.get_continuous_count(r, c, -1, -1)
ne_count = self.get_continuous_count(r, c, -1, 1)
sw_count = self.get_continuous_count(r, c, 1, -1)
if (n_count + s_count + 1 >= 5) or (e_count + w_count + 1 >= 5) or \
(se_count + nw_count + 1 >= 5) or (ne_count + sw_count + 1 >= 5):
return True
return False
class MCTS:
# Constructor for MCTS
def __init__(self, grid, player, r, c, first):
self.first = first
self.grid = grid
self.game_over = False
self.player = player
self.maxrc = len(grid)-1
self.winner = None
self.winner_m = None
self.piece = player
self.grid_count = 11
self.root = State(grid, player, None, (r, c), self.get_options_ibounds(grid, r, c))
self.root.counter = 1
self.grid_size = 46
self.start_x, self.start_y = 38, 55
self.edge_size = self.grid_size // 2
self.counter = 1
def uct_search(self):
i = 0
# Computational budget 8000 loops
while i < 800:
s = self.selection(self.root)
# Retrieve simulation for current board
winner = self.simulation(s)
# Backpropogate to root
self.backpropagation(s, winner)
i += 1
if(self.first and i > 5):
break
max = 0
maxNode = None
# Retrieve node with max Q/N
for node in self.root.children:
val = node.Q/node.N
if(val > max):
max = val
maxNode = node
return maxNode.coord
def selection(self, state):
# Go down to terminal val
while not state.terminalVal:
# Check whether state is expanded
expanded = self.isFullyExpanded(state)
if not expanded:
return self.expansion(state)
else: # If not fully expanded then return the best child
newChild = self.best_child(state)
if(newChild is None):
return state
state = newChild
return state
# Every node knows its available options so if a node has 0 options then
# it must not have any children
def isFullyExpanded(self, state):
return len(state.options) == 0
# Expand one node
def expansion(self, state):
newGrid = copy.deepcopy(state.grid)
# Pop the option with the highest get_continuous value
opt = heapq.heappop(state.options)
r, c = opt[1]
playerVal = ''
if state.player == 'b':
playerVal = 'w'
else:
playerVal = 'b'
if newGrid[r][c] == '.':
newGrid[r][c] = state.player
# Create a child with new options
newChildOptions = self.get_options_ibounds(newGrid, r, c)
newChild = State(newGrid, playerVal, state, (r,c), newChildOptions)
newChild.counter = self.counter
self.counter += 1
state.add_child(newChild)
return newChild
else:
# Create a child with new options
newChildOptions = self.get_options_ibounds(newGrid, r, c)
newChild = State(newGrid, playerVal, state, (r,c), newChildOptions)
newChild.counter = self.counter
self.counter += 1
# Set terminal val to true since this game results in a tie.
newChild.terminalVal = True
state.add_child(newChild)
return newChild
def best_child(self, state):
maxNode = None
maxVal = 0
# Choose the child with the greatest Q'/N' + sqrt(log(N)/N')
for child in state.children:
tmp = child.Q/child.N + 2.0*sqrt(log(state.N)/child.N)
if(tmp > maxVal):
maxNode = child
maxVal = tmp
return maxNode
# Simulate a game until it finishes
def simulation(self, state):
return self.rollout_m(copy.deepcopy(state.grid))
# Set a particular piece to the current player
def set_piece(self, state, r, c):
grid = state.grid
if grid[r][c] == '.':
grid[r][c] = state.player
if state.player == 'b':
state.player = 'w'
else:
state.player = 'b'
return True
return False
# Get best white piece, has most consecutive in a line
def get_best_white(self, grid):
maxWhite = 0
whitePos = 5, 5
for r in range(len(grid)):
for c in range(len(grid)):
if(grid[r][c] == 'w'):
# For every white piece find the max in-a-row value
n_count = self.get_continuous_count_m(grid, r, c, -1, 0)
s_count = self.get_continuous_count_m(grid, r, c, 1, 0)
e_count = self.get_continuous_count_m(grid, r, c, 0, 1)
w_count = self.get_continuous_count_m(grid, r, c, 0, -1)
se_count = self.get_continuous_count_m(grid, r, c, 1, 1)
nw_count = self.get_continuous_count_m(grid, r, c, -1, -1)
ne_count = self.get_continuous_count_m(grid, r, c, -1, 1)
sw_count = self.get_continuous_count_m(grid, r, c, 1, -1)
maxCount = max((n_count + s_count), (e_count + w_count), (se_count + nw_count), (ne_count + sw_count))
# If you found a white piece that has more in-a-row pieces then update your whitePos piece
if(maxCount > maxWhite):
maxWhite = maxCount
whitePos = (r, c)
return whitePos
def get_options_ibounds(self, grid, row, col):
# Create a bounding box around the best white piece
row, col = self.get_best_white(grid)
current_pcs = []
optimal_pcs = []
bottom = 0
top = 0
left = 0
right = 0
if(row - 2 < 0): # If box hits bottom
bottom = 0
top = 5
elif(row + 3 > len(grid)): # If box hits top
top = len(grid) - 1
bottom = len(grid) - 6
else: # Otherwise can set upper and lower bounds normally
bottom = row - 2
top = row + 2
if(col - 2 < 0): # If box hits left wall
left = 0
right = 5
elif(col + 3 > len(grid)): # If box hits right wall
right = len(grid) - 1
left = len(grid) - 6
else: # Otherwise can set left and right bounds normally
left = col - 2
right = col + 2
for r in range(bottom, top):
for c in range(left, right):
if(grid[r][c] == '.'):
# For each option in the bounding box see which option would give you the max in-a-row pieces
grid[r][c] = 'w'
n_count = self.get_continuous_count_m(grid, r, c, -1, 0)
s_count = self.get_continuous_count_m(grid, r, c, 1, 0)
e_count = self.get_continuous_count_m(grid, r, c, 0, 1)
w_count = self.get_continuous_count_m(grid, r, c, 0, -1)
se_count = self.get_continuous_count_m(grid, r, c, 1, 1)
nw_count = self.get_continuous_count_m(grid, r, c, -1, -1)
ne_count = self.get_continuous_count_m(grid, r, c, -1, 1)
sw_count = self.get_continuous_count_m(grid, r, c, 1, -1)
maxCount = max((n_count + s_count), (e_count + w_count), (se_count + nw_count), (ne_count + sw_count))
grid[r][c] = '.'
# Add the option to the priority queue, with the priority value being the negative of the in-a-row count
current_pcs.append((-maxCount, (r, c)))
# If at least one piece next to it add to optimal pieces
if(maxCount > 2):
optimal_pcs.append((-maxCount, (r, c)))
# If there were any pieces next to one another return those
if(len(optimal_pcs) > 0):
heapq.heapify(optimal_pcs)
current_pcs = optimal_pcs
else: # Otherwise just return the options
heapq.heapify(current_pcs)
return current_pcs
def get_options(self, grid):
#Collect all occupied spots
current_pcs = []
for r in range(len(grid)):
for c in range(len(grid)):
if not grid[r][c] == '.':
current_pcs.append((r,c))
#At the beginning of the game, curernt_pcs is empty
if not current_pcs:
return [(self.maxrc//2, self.maxrc//2)]
#Reasonable moves should be close to where the current pieces are
#Think about what these calculations are doing
#Note: min(list, key=lambda x: x[0]) picks the element with the min value on the first dimension
min_r = max(0, min(current_pcs, key=lambda x: x[0])[0]-1)
max_r = min(self.maxrc, max(current_pcs, key=lambda x: x[0])[0]+1)
min_c = max(0, min(current_pcs, key=lambda x: x[1])[1]-1)
max_c = min(self.maxrc, max(current_pcs, key=lambda x: x[1])[1]+1)
#Options of reasonable next step moves
options = []
for i in range(min_r, max_r+1):
for j in range(min_c, max_c+1):
if not (i, j) in current_pcs:
options.append((i,j))
if len(options) == 0:
#In the unlikely event that no one wins before board is filled
#Make white win since black moved first
self.game_over = True
self.winner = 'w'
return options
# Gets continuous count in a specific direction
def get_continuous_count(self, grid, r, c, dr, dc):
piece = grid[r][c]
result = 0
i = 1
while True:
new_r = r + dr * i
new_c = c + dc * i
if 0 <= new_r < self.grid_count and 0 <= new_c < self.grid_count:
if self.grid[new_r][new_c] == piece:
result += 1
else:
break
else:
break
i += 1
return result
# Backpropogate and update parent nodes
def backpropagation(self, state, result):
while state is not None:
if(len(result) == 0):
state.Q += 0.5
elif result[state.player] == 0:
state.Q += 1
state.N += 1
state = state.parent
def rollout_m(self, grid):
self.game_over = False
simReward = {}
while not self.game_over:
r,c = self.make_move_m(grid)
self.set_piece_m(grid,r,c)
self.check_win_m(grid,r,c)
#assign rewards
if self.winner_m == 'b':
simReward['b'] = 0
simReward['w'] = 1
elif self.winner_m == 'w':
simReward['b'] = 1
simReward['w'] = 0
return simReward
def get_continuous_count_m(self, grid, r, c, dr, dc):
piece = grid[r][c]
result = 0
i = 1
while True:
new_r = r + dr * i
new_c = c + dc * i
if 0 <= new_r < self.grid_count and 0 <= new_c < self.grid_count:
if grid[new_r][new_c] == piece:
result += 1
else:
break
else:
break
i += 1
return result
def set_piece_m(self, grid, r, c):
if grid[r][c] == '.':
grid[r][c] = self.piece
if self.piece == 'b':
self.piece = 'w'
else:
self.piece = 'b'
return True
return False
def check_win_m(self, grid, r, c):
n_count = self.get_continuous_count_m(grid, r, c, -1, 0)
s_count = self.get_continuous_count_m(grid, r, c, 1, 0)
e_count = self.get_continuous_count_m(grid, r, c, 0, 1)
w_count = self.get_continuous_count_m(grid, r, c, 0, -1)
se_count = self.get_continuous_count_m(grid, r, c, 1, 1)
nw_count = self.get_continuous_count_m(grid, r, c, -1, -1)
ne_count = self.get_continuous_count_m(grid, r, c, -1, 1)
sw_count = self.get_continuous_count_m(grid, r, c, 1, -1)
if (n_count + s_count + 1 >= 5) or (e_count + w_count + 1 >= 5) or \
(se_count + nw_count + 1 >= 5) or (ne_count + sw_count + 1 >= 5):
self.winner_m = grid[r][c]
self.game_over = True
def make_move_m(self, grid):
options = self.get_options(grid)
if len(options) == 0:
return -1, -1
return random.choice(options)
|
import io
import os
from tqdm import tqdm
class ProgressReportingReader(io.BufferedReader):
def __init__(self, file_path, *, tqdm_instance=None):
super().__init__(open(file_path, 'rb'))
self._filename = os.path.basename(file_path)
if tqdm_instance is None:
self._owns_tqdm = True
self.tqdm = tqdm(
unit='bytes',
unit_scale=True,
total=os.path.getsize(file_path),
)
else:
self._owns_tqdm = False
self.tqdm = tqdm_instance
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if self._owns_tqdm:
self.tqdm.close()
self.close()
def read(self, *args, **kwargs):
chunk = super().read(*args, **kwargs)
self.tqdm.set_postfix(file=self._filename, refresh=False)
self.tqdm.update(len(chunk))
return chunk
def read1(self, *args, **kwargs):
chunk = super().read1(*args, **kwargs)
self.tqdm.set_postfix(file=self._filename, refresh=False)
self.tqdm.update(len(chunk))
return chunk
def readinto(self, *args, **kwargs):
count = super().readinto(*args, **kwargs)
self.tqdm.set_postfix(file=self._filename, refresh=False)
self.tqdm.update(count)
def readinto1(self, *args, **kwargs):
count = super().readinto1(*args, **kwargs)
self.tqdm.set_postfix(file=self._filename, refresh=False)
self.tqdm.update(count)
|
'''
CSCI 677
Homework 2-b) Watershed Segmentor
Dixith Reddy Gomari
3098766483
gomari@usc.edu
References: Double click function:
http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.html
'''
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('img3.jpg')
h,w,r=img.shape # Dimensions of the source image
markers = np.zeros((h,w), np.int32) # Markers image to use it in watershed with same size as the image
# Double click left button for marker location
print("Double click for giving the markers")
print("Press 'Esc' once done giving the marker loction")
def double_click(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
markers[y][x] = np.random.randint(0,1000) # Storing a andom number for every marker location to assign a random color for segmentation
print("Marker location: "+str(y)+","+str(x))
cv2.namedWindow('Image')
cv2.setMouseCallback('Image',double_click)
while(1):
cv2.imshow('Image',img)
if cv2.waitKey(20) & 0xFF == 27:
break
markers = cv2.watershed(img, markers)
# Assigns random colors to the markers for segmentation
for i in range(0,1000):
img[markers == i] = [np.random.randint(0,255), np.random.randint(0,255),np.random.randint(0,255)]
cv2.imshow('Segmented Image', img)
cv2.imwrite('Segmented_Image.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
class Direction():
North = 0
East = 1
South = 2
West = 3
class Board():
def __init__(self, maxx, maxy, obstacles=list()):
self.maxx = maxx
self.maxy = maxy
self.obstacles = dict()
for x,y in obstacles:
if x not in self.obstacles:
self.obstacles[x] = dict()
self.obstacles[x][y] = True
def checkObstacle(self, x,y):
if x not in self.obstacles:
return False
if y not in self.obstacles[x]:
return False
return True
class Rover():
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
def __init__(self, board, x, y, direction):
self.board = board
self.x, self.y = x, y
self.direction = direction
def handleCommands(self, commands):
print(commands)
for cmd in commands:
newx, newy = self.x, self.y
if cmd == "F":
newx = (newx + Rover.dx[ self.direction ]) % self.board.maxx
newy = (newy + Rover.dy[ self.direction ]) % self.board.maxy
elif cmd == "B":
newx = (newx - Rover.dx[ self.direction ]) % self.board.maxx
newy = (newy - Rover.dy[ self.direction ]) % self.board.maxy
elif cmd == "L":
self.direction = (self.direction -1) % 4
elif cmd == "R":
self.direction = (self.direction +1) % 4
if self.board.checkObstacle(newx, newy):
print("ERROR", self.x, self.y)
break
self.x = newx
self.y = newy
print(cmd, self.x, self.y, self.direction)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import sys
from gbpclient.gbp.v2_0 import groupbasedpolicy as gbp
from gbpclient.tests.unit import test_cli20
class CLITestV20ExternalPolicyJSON(test_cli20.CLITestV20Base):
LOG = logging.getLogger(__name__)
def setUp(self):
super(CLITestV20ExternalPolicyJSON, self).setUp()
def test_create_external_policy_with_mandatory_params(self):
"""external-policy-create with all mandatory params."""
resource = 'external_policy'
cmd = gbp.CreateExternalPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'my-name'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--tenant-id', tenant_id,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id)
def test_create_external_policy_with_all_params(self):
"""external-policy-create with all params."""
resource = 'external_policy'
cmd = gbp.CreateExternalPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
tenant_id = 'mytenant'
description = 'My External Policy'
my_id = 'someid'
provided_policy_rule_sets = "prs1=true,prs2=true"
consumed_policy_rule_sets = "prs3=true,prs4=true"
external_segments = "ES1,ES2"
shared = 'true'
args = ['--tenant-id', tenant_id,
'--description', description,
'--provided-policy-rule-sets', provided_policy_rule_sets,
'--consumed-policy-rule-sets', consumed_policy_rule_sets,
'--external-segments', external_segments,
'--shared', shared,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
description=description,
provided_policy_rule_sets=
{'prs1': 'true', 'prs2': 'true'},
consumed_policy_rule_sets=
{'prs3': 'true', 'prs4': 'true'},
external_segments=
['ES1', 'ES2'],
shared=shared)
def test_list_external_policies(self):
"""external-policy-list."""
resource = 'external_policies'
cmd = gbp.ListExternalPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resource, cmd, True)
def test_show_external_policy_name(self):
"""external-policy-show."""
resource = 'external_policy'
cmd = gbp.ShowExternalPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_update_external_policy(self):
"external-policy-update myid --name myname --tags a b."
resource = 'external_policy'
cmd = gbp.UpdateExternalPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_update_external_policy_with_all_params(self):
resource = 'external_policy'
cmd = gbp.UpdateExternalPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
description = 'My External Policy'
my_id = 'someid'
provided_policy_rule_sets = "prs1=true,prs2=true"
consumed_policy_rule_sets = "prs3=true,prs4=true"
external_segments = "ES1,ES2"
shared = 'true'
args = ['--name', name,
'--description', description,
'--provided-policy-rule-sets', provided_policy_rule_sets,
'--consumed-policy-rule-sets', consumed_policy_rule_sets,
'--external-segments', external_segments,
'--shared', shared,
my_id]
params = {
'name': name,
'description': description,
'provided_policy_rule_sets': {'prs1': 'true', 'prs2': 'true'},
'consumed_policy_rule_sets': {'prs3': 'true', 'prs4': 'true'},
'external_segments': ['ES1', 'ES2'],
'shared': shared
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_update_external_policy_unset_external_segment(self):
resource = 'external_policy'
cmd = gbp.UpdateExternalPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'someid'
external_segments = ""
args = ['--external-segments', external_segments, my_id]
params = {'external_segments': []}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_update_external_policy_unset_prs(self):
resource = 'external_policy'
cmd = gbp.UpdateExternalPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'someid'
provided_policy_rule_sets = ""
consumed_policy_rule_sets = ""
args = ['--provided-policy-rule-sets', provided_policy_rule_sets,
'--consumed-policy-rule-sets', consumed_policy_rule_sets,
my_id]
params = {
'provided_policy_rule_sets': {},
'consumed_policy_rule_sets': {},
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_delete_external_policy_name(self):
"""external-policy-delete."""
resource = 'external_policy'
cmd = gbp.DeleteExternalPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
|
class A:
def __init__(self, a):
self._x = a
self.y = self._x
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions for configuring Bokeh output.
'''
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
# Stdlib imports
import logging
logger = logging.getLogger(__name__)
import io
import json
import os
import warnings
import tempfile
import uuid
# Bokeh imports
from .core.state import State
from .document import Document
from .embed import autoload_server, notebook_div, file_html
from .layouts import gridplot, GridSpec ; gridplot, GridSpec
from .resources import INLINE
import bokeh.util.browser as browserlib # full import needed for test mocking to work
from .util.dependencies import import_required, detect_phantomjs
from .util.deprecation import deprecated
from .util.notebook import get_comms, load_notebook, publish_display_data, watch_server_cells
from .util.string import decode_utf8
from .util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
_new_param = {'tab': 2, 'window': 1}
_state = State()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class _CommsHandle(object):
_json = {}
def __init__(self, comms, doc, json):
self._cellno = None
try:
from IPython import get_ipython
ip = get_ipython()
hm = ip.history_manager
p_prompt = list(hm.get_tail(1, include_latest=True))[0][1]
self._cellno = p_prompt
except Exception as e:
logger.debug("Could not get Notebook cell number, reason: %s", e)
self._comms = comms
self._doc = doc
self._json[doc] = json
def _repr_html_(self):
if self._cellno is not None:
return "<p><code><Bokeh Notebook handle for <strong>In[%s]</strong>></code></p>" % str(self._cellno)
else:
return "<p><code><Bokeh Notebook handle></code></p>"
@property
def comms(self):
return self._comms
@property
def doc(self):
return self._doc
@property
def json(self):
return self._json[self._doc]
def update(self, doc, json):
self._doc = doc
self._json[doc] = json
def output_file(filename, title="Bokeh Plot", mode="cdn", root_dir=None):
'''Configure the default output state to generate output saved
to a file when :func:`show` is called.
Does not change the current Document from curdoc(). File and notebook
output may be active at the same time, so e.g., this does not clear the
effects of ``output_notebook()``.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document (default: "Bokeh Plot")
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked.
'''
_state.output_file(
filename,
title=title,
mode=mode,
root_dir=root_dir
)
def output_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'):
''' Configure the default output state to generate output in
Jupyter/Zeppelin notebook cells when :func:`show` is called.
Args:
resources (Resource, optional) :
How and where to load BokehJS from (default: CDN)
verbose (bool, optional) :
whether to display detailed BokehJS banner (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
load_timeout (int, optional) :
Timeout in milliseconds when plots assume load timed out (default: 5000)
notebook_type (string, optional):
Notebook type (default: jupyter)
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
'''
# verify notebook_type first in _state.output_notebook
_state.output_notebook(notebook_type)
load_notebook(resources, verbose, hide_banner, load_timeout, notebook_type)
def set_curdoc(doc):
'''Configure the current document (returned by curdoc()).
Args:
doc (Document) : Document we will output.
Returns:
None
.. warning::
Calling this function will replace any existing document.
'''
_state.document = doc
def curdoc():
''' Return the document for the current default state.
Returns:
Document : the current default document object.
'''
return _state.document
def curstate():
''' Return the current State object
Returns:
State : the current default State object
'''
return _state
def show(obj, browser=None, new="tab", notebook_handle=False, notebook_url="localhost:8888"):
''' Immediately display a Bokeh object or application.
Args:
obj (LayoutDOM or Application) :
A Bokeh object to display.
Bokeh plots, widgets, layouts (i.e. rows and columns) may be
passed to ``show`` in order to display them. When ``output_file``
has been called, the output will be to an HTML file, which is also
opened in a new browser window or tab. When ``output_notebook``
has been called in a Jupyter notebook, the output will be inline
in the associated notebook output cell.
In a Jupyter notebook, a Bokeh application may also be passed.
The application will be run and displayed inline in the associated
notebook output cell.
browser (str, optional) :
Specify the browser to use to open output files(default: None)
For file output, the **browser** argument allows for specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". Not all platforms may support this option, see
the documentation for the standard library webbrowser_ module for
more information
new (str, optional) :
Specify the browser mode to use for output files (default: "tab")
For file output, opens or raises the browser window showing the
current output file. If **new** is 'tab', then opens a new tab.
If **new** is 'window', then opens a new window.
notebook_handle (bool, optional) :
Whether to create a notebook interaction handle (default: False)
For notebook output, toggles whether a handle which can be used
with ``push_notebook`` is returned. Note that notebook handles
only apply to standalone plots, layouts, etc. They do not apply
when showing Applications in the notebook.
notebook_url (URL, optional) :
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a differnet location, you
will need to supply this value for the application to display
properly.
It is also possible to pass ``notebook_url="*"`` to disable the
standard checks, so that applications will display regardless of
the current notebook location, however a warning will appear.
Some parameters are only useful when certain output modes are active:
* The ``browser`` and ``new`` parameters only apply when ``output_file``
is active.
* The ``notebook_handle`` parameter only applies when ``output_notebook``
is active, and non-Application objects are being shown. It is only supported to Jupyter notebook,
raise exception for other notebook types when it is True.
* The ``notebook_url`` parameter only applies when showing Bokeh
Applications in a Jupyter notebook.
Returns:
When in a Jupyter notebook (with ``output_notebook`` enabled)
and ``notebook_handle=True``, returns a handle that can be used by
``push_notebook``, None otherwise.
.. _webbrowser: https://docs.python.org/2/library/webbrowser.html
'''
# This ugliness is to prevent importing bokeh.application (which would bring
# in Tornado) just in order to show a non-server object
if getattr(obj, '_is_a_bokeh_application_class', False):
return _show_notebook_app_with_state(obj, _state, "/", notebook_url)
if obj not in _state.document.roots:
_state.document.add_root(obj)
return _show_with_state(obj, _state, browser, new, notebook_handle=notebook_handle)
def _show_notebook_app_with_state(app, state, app_path, notebook_url):
if state.notebook_type == 'zeppelin':
raise ValueError("Zeppelin doesn't support show bokeh app.")
if not state.watching_cells:
watch_server_cells(_destroy_server_js)
state.watching_cells = True
logging.basicConfig()
from IPython.display import HTML, display
from tornado.ioloop import IOLoop
from .server.server import Server
loop = IOLoop.current()
server = Server({app_path: app}, io_loop=loop, port=0, allow_websocket_origin=[notebook_url])
server.start()
script = autoload_server(url='http://127.0.0.1:%d%s' % (server.port, app_path))
display(HTML(_server_cell(server, script)))
def _show_with_state(obj, state, browser, new, notebook_handle=False):
controller = browserlib.get_browser_controller(browser=browser)
comms_handle = None
shown = False
if state.notebook:
if state.notebook_type == 'jupyter':
comms_handle = _show_jupyter_with_state(obj, state, notebook_handle)
else:
comms_handle = _show_zeppelin_with_state(obj, state, notebook_handle)
shown = True
if state.file or not shown:
_show_file_with_state(obj, state, new, controller)
return comms_handle
def _show_file_with_state(obj, state, new, controller):
filename = save(obj, state=state)
controller.open("file://" + filename, new=_new_param[new])
def _show_jupyter_with_state(obj, state, notebook_handle):
comms_target = make_id() if notebook_handle else None
publish_display_data({'text/html': notebook_div(obj, comms_target)})
if comms_target:
handle = _CommsHandle(get_comms(comms_target), state.document,
state.document.to_json())
state.last_comms_handle = handle
return handle
def _show_zeppelin_with_state(obj, state, notebook_handle):
if notebook_handle:
raise ValueError("Zeppelin doesn't support notebook_handle.")
print("%html " + notebook_div(obj))
return None
def save(obj, filename=None, resources=None, title=None, state=None, **kwargs):
''' Save an HTML file with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``filename``, ``resources``, or ``title`` if they
are not provided. If the filename is not given and not provided via output state,
it is derived from the script name (e.g. ``/foo/myplot.py`` will create
``/foo/myplot.html``)
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
filename (str, optional) : filename to save document under (default: None)
If None, use the default state configuration.
resources (Resources, optional) : A Resources config to use (default: None)
If None, use the default state configuration, if there is one.
otherwise use ``resources.INLINE``.
title (str, optional) : a title for the HTML document (default: None)
If None, use the default state title value, if there is one.
Otherwise, use "Bokeh Plot"
state (State, optional) :
A :class:`State` object. If None, then the current default
implicit state is used. (default: None).
Returns:
str: the filename where the HTML file is saved.
'''
if 'validate' in kwargs:
deprecated((0, 12, 5), 'The `validate` keyword argument', 'None', """
The keyword argument has been removed and the document will always be validated.""")
if state is None:
state = _state
filename, resources, title = _get_save_args(state, filename, resources, title)
_save_helper(obj, filename, resources, title)
return os.path.abspath(filename)
def _detect_filename(ext):
""" Detect filename from the name of the script being run. Returns
temporary file if the script could not be found or the location of the
script does not have write permission (e.g. interactive mode).
"""
import inspect
from os.path import dirname, basename, splitext, join, curdir
frame = inspect.currentframe()
while frame.f_back and frame.f_globals.get('name') != '__main__':
frame = frame.f_back
filename = frame.f_globals.get('__file__')
if filename is None or not os.access(dirname(filename) or curdir, os.W_OK | os.X_OK):
return tempfile.NamedTemporaryFile(suffix="." + ext).name
name, _ = splitext(basename(filename))
return join(dirname(filename), name + "." + ext)
def _get_save_args(state, filename, resources, title):
warn = True
if filename is None and state.file:
filename = state.file['filename']
if filename is None:
warn = False
filename = _detect_filename("html")
if resources is None and state.file:
resources = state.file['resources']
if resources is None:
if warn:
warnings.warn("save() called but no resources were supplied and output_file(...) was never called, defaulting to resources.CDN")
from .resources import CDN
resources = CDN
if title is None and state.file:
title = state.file['title']
if title is None:
if warn:
warnings.warn("save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'")
title = "Bokeh Plot"
return filename, resources, title
def _save_helper(obj, filename, resources, title):
html = file_html(obj, resources, title=title)
with io.open(filename, mode="w", encoding="utf-8") as f:
f.write(decode_utf8(html))
def push_notebook(document=None, state=None, handle=None):
''' Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title = "New Title"
push_notebook(handle=handle)
'''
if state is None:
state = _state
if not document:
document = state.document
if not document:
warnings.warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
warnings.warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()")
return
to_json = document.to_json()
if handle.doc is not document:
msg = dict(doc=to_json)
else:
msg = Document._compute_patch_between_json(handle.json, to_json)
handle.comms.send(json.dumps(msg))
handle.update(document, to_json)
def reset_output(state=None):
''' Clear the default state of all output modes.
Returns:
None
'''
_state.reset()
def _remove_roots(subplots):
doc = _state.document
for sub in subplots:
if sub in doc.roots:
doc.remove_root(sub)
def _server_cell(server, script):
''' Wrap a script returned by ``autoload_server`` in a div that allows cell
destruction/replacement to be detected.
'''
divid = uuid.uuid4().hex
_state.uuid_to_server[divid] = server
div_html = "<div class='bokeh_class' id='{divid}'>{script}</div>"
return div_html.format(script=script, divid=divid)
_destroy_server_js = """
var cmd = "from bokeh import io; io._destroy_server('<%= destroyed_id %>')";
var command = _.template(cmd)({destroyed_id:destroyed_id});
Jupyter.notebook.kernel.execute(command);
"""
def _destroy_server(div_id):
''' Given a UUID id of a div removed or replaced in the Jupyter
notebook, destroy the corresponding server sessions and stop it.
'''
server = _state.uuid_to_server.get(div_id, None)
if server is None:
logger.debug("No server instance found for uuid: %r" % div_id)
return
try:
for session in server.get_sessions():
session.destroy()
except Exception as e:
logger.debug("Could not destroy server for id %r: %s" % (div_id, e))
def _wait_until_render_complete(driver):
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
script = """
// add private window prop to check that render is complete
window._bokeh_render_complete = false;
function done() {
window._bokeh_render_complete = true;
}
var doc = window.Bokeh.documents[0];
if (doc.is_idle)
done();
else
doc.idle.connect(done);
"""
driver.execute_script(script)
def is_bokeh_render_complete(driver):
return driver.execute_script('return window._bokeh_render_complete;')
try:
WebDriverWait(driver, 5, poll_frequency=0.1).until(is_bokeh_render_complete)
except TimeoutException:
logger.warn("The webdriver raised a TimeoutException while waiting for \
a 'bokeh:idle' event to signify that the layout has rendered. \
Something may have gone wrong.")
finally:
browser_logs = driver.get_log('browser')
severe_errors = [l for l in browser_logs if l.get('level') == 'SEVERE']
if len(severe_errors) > 0:
logger.warn("There were severe browser errors that may have affected your export: {}".format(severe_errors))
def _crop_image(image, left=0, top=0, right=0, bottom=0, **kwargs):
'''Crop the border from the layout'''
cropped_image = image.crop((left, top, right, bottom))
return cropped_image
def _get_screenshot_as_png(obj):
webdriver = import_required('selenium.webdriver',
'To use bokeh.io.export_png you need selenium ' +
'("conda install -c bokeh selenium" or "pip install selenium")')
Image = import_required('PIL.Image',
'To use bokeh.io.export_png you need pillow ' +
'("conda install pillow" or "pip install pillow")')
# assert that phantomjs is in path for webdriver
detect_phantomjs()
html_path = tempfile.NamedTemporaryFile(suffix=".html").name
save(obj, filename=html_path, resources=INLINE, title="")
driver = webdriver.PhantomJS()
driver.get("file:///" + html_path)
## resize for PhantomJS compat
driver.execute_script("document.body.style.width = '100%';")
_wait_until_render_complete(driver)
png = driver.get_screenshot_as_png()
bounding_rect_script = "return document.getElementsByClassName('bk-root')[0].children[0].getBoundingClientRect()"
b_rect = driver.execute_script(bounding_rect_script)
driver.quit()
image = Image.open(io.BytesIO(png))
cropped_image = _crop_image(image, **b_rect)
return cropped_image
def export_png(obj, filename=None):
''' Export the LayoutDOM object as a PNG.
If the filename is not given, it is derived from the script name
(e.g. ``/foo/myplot.py`` will create ``/foo/myplot.png``)
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
filename (str, optional) : filename to save document under (default: None)
If None, infer from the filename.
Returns:
filename (str) : the filename where the static file is saved.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
.. warning::
Glyphs that are rendered via webgl won't be included in the generated PNG.
'''
image = _get_screenshot_as_png(obj)
if filename is None:
filename = _detect_filename("png")
image.save(filename)
return os.path.abspath(filename)
def _get_svgs(obj):
webdriver = import_required('selenium.webdriver',
'To use bokeh.io.export_svgs you need selenium ' +
'("conda install -c bokeh selenium" or "pip install selenium")')
# assert that phantomjs is in path for webdriver
detect_phantomjs()
html_path = tempfile.NamedTemporaryFile(suffix=".html").name
save(obj, filename=html_path, resources=INLINE, title="")
driver = webdriver.PhantomJS()
driver.get("file:///" + html_path)
_wait_until_render_complete(driver)
svg_script = """
var serialized_svgs = [];
var svgs = document.getElementsByClassName('bk-root')[0].getElementsByTagName("svg");
for (var i = 0; i < svgs.length; i++) {
var source = (new XMLSerializer()).serializeToString(svgs[i]);
serialized_svgs.push(source);
};
return serialized_svgs
"""
svgs = driver.execute_script(svg_script)
driver.quit()
return svgs
def export_svgs(obj, filename=None):
''' Export the SVG-enabled plots within a layout. Each plot will result
in a distinct SVG file.
If the filename is not given, it is derived from the script name
(e.g. ``/foo/myplot.py`` will create ``/foo/myplot.svg``)
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
filename (str, optional) : filename to save document under (default: None)
If None, infer from the filename.
Returns:
filenames (list(str)) : the list of filenames where the SVGs files
are saved.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
svgs = _get_svgs(obj)
if len(svgs) == 0:
logger.warn("No SVG Plots were found.")
return
if filename is None:
filename = _detect_filename("svg")
filenames = []
for i, svg in enumerate(svgs):
if i == 0:
filename = filename
else:
idx = filename.find(".svg")
filename = filename[:idx] + "_{}".format(i) + filename[idx:]
with io.open(filename, mode="w", encoding="utf-8") as f:
f.write(svg)
filenames.append(filename)
return filenames
|
#!/proj/sot/ska3/flight/bin/python
#############################################################################################
# #
# run_glimmon_trend_data_update.py: update trend data with limits in glimmon database #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 01, 2021 #
# #
#############################################################################################
import os
import sys
import re
import string
import time
import numpy
import argparse
import getpass
import astropy.io.fits as pyfits
from astropy.io.fits import Column
import Ska.engarchive.fetch as fetch
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(bin_dir)
sys.path.append("/data/mta4/Script/Python3.10/MTA")
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
import envelope_common_function as ecf #---- contains other functions commonly used in envelope
import fits_operation as mfo #---- fits operation collection
import read_limit_table as rlt #---- read limit table and create msid<--> limit dict
#
#--- other path setting
#
#limit_dir = '/data/mta/Script/MSID_limit/Trend_limit_data/'
limit_dir = '/data/mta/Script/MSID_limit/Trend_limit_data/'
#
#--- fits generation related lists
#
col_names = ['time', 'msid', 'med', 'std', 'min', 'max',
'ylower', 'yupper', 'rlower', 'rupper', 'dcount',
'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper', 'state']
col_format = ['D', '20A', 'D', 'D','D','D','D','D','D','D', 'I', 'D', 'D', 'D', 'D', '10A']
a_month = 86400 * 30
#--------------------------------------------------------------------------------
#-- run_glimmon_trend_data_update: update trend data with limits in glimmon database
#--------------------------------------------------------------------------------
def run_glimmon_trend_data_update():
"""
update trend data with limits in glimmon database
input: none
output: <data_dir>/<cateogry>/<msid>_<dtype>_data.fits
"""
#
#--- create msid <---> category dict
#
catg_dict = create_category_dict()
#
#--- multi state data --- no more distinction (Jan 22, 2020)
#
run_data_update('m', catg_dict)
#
#--- no state data
#
# run_data_update('n', catg_dict)
#--------------------------------------------------------------------------------
#-- run_data_update: extract data for the specified limit category type ---
#--------------------------------------------------------------------------------
def run_data_update(mtype, catg_dict):
"""
extract data for the specified limit category type
input: mtype --- limit state type; m: multi state/n: no state
catg_dict --- a dictionary of msid <---> cateogry
output: updated data fits files
"""
[lim_dict, cnd_dict] = rlt.get_limit_table()
# if mtype == 'm':
# ifile = limit_dir + 'Limit_data/multi_switch_limit'
# else:
# ifile = limit_dir + 'Limit_data/trend_limit'
ifile = limit_dir + 'Limit_data/op_limits_new.db'
#
#--- first find which msids are in that category, and extract data
#
data = mcf.read_data_file(ifile)
for ent in data:
if ent[0] == '#':
continue
atemp = re.split('\s+', ent)
msid = atemp[0]
catg = catg_dict[msid]
#
#--- just in a case the data category directory does not exist
#
cmd = 'mkdir -p ' + data_dir + atemp[1]
os.system(cmd)
print("MSID: " + catg + '/' + msid)
#
#--- three different data length
#
for dtype in ['week', 'short', 'long']:
#
#--- set data period
#
[dfile, start, stop] = find_data_collection_period(msid, catg, dtype)
#
#--- extract new data part; saved as a local fits file
#
alimit = lim_dict[msid]
cnd_msid = cnd_dict[msid]
out = extract_data_from_ska(msid, start, stop, dtype, alimit, cnd_msid)
#
#--- update the main fits file, either move the local file or append the new part
#
if out == True:
update_data_file(dfile, msid, dtype)
#--------------------------------------------------------------------------------
#-- run_for_msid_list: extract data from ska database for a given msid_list ---
#--------------------------------------------------------------------------------
def run_for_msid_list(msid_list, dtype):
"""
extract data from ska database for a given msid_list
input: misd_list --- the file name of the msid_list
dtype --- data type , week, short, or long
output: updated data fits files
"""
[lim_dict, cnd_dict] = rlt.get_limit_table()
ifile = house_keeping + msid_list
data = mcf.read_data_file(ifile)
for ent in data:
if ent[0] == '#':
continue
elif ent.strip() == '':
continue
atemp = re.split('\s+', ent)
msid = atemp[0].strip()
catg = atemp[1].strip()
print("MSID: " + catg + '/' + msid)
#
#--- just in a case the data category directory does not exist
#
cmd = 'mkdir -p ' + data_dir + atemp[1]
os.system(cmd)
#
#--- set data period
#
[dfile, start, stop] = find_data_collection_period(msid, catg, dtype)
#
#--- extract new data part; saved as a local fits file
#
try:
alimit = lim_dict[msid]
cnd_msid = cnd_dict[msid]
#
#--- if the collection time is larger than a month, extract data for 30 day chunk
#
diff = stop - start
if diff > a_month:
mcnt = int(diff / a_month)
for m in range(0, mcnt):
mstart = start + a_month * m
mstop = mstart + a_month
lstart = "%4.2f" % mcf.chandratime_to_fraq_year(mstart)
lstop = "%4.2f" % mcf.chandratime_to_fraq_year(mstop)
print("Computing: " + str(lstart) + '<-->' + str(lstop))
#
#--- extract data and make a local fits file
#
out = extract_data_from_ska(msid, mstart, mstop, dtype, alimit, cnd_msid)
#
#--- update the main fits file, either move the local file or append the new part
#
if out == True:
update_data_file(dfile, msid, dtype)
out = extract_data_from_ska(msid, mstop, stop, dtype, alimit, cnd_msid)
if out == True:
update_data_file(dfile, msid, dtype)
#
#--- the data collection period is < 30 days
#
else:
out = extract_data_from_ska(msid, start, stop, dtype, alimit, cnd_msid)
if out == True:
update_data_file(dfile, msid, dtype)
except:
#print(msid + ' is not in glimmon database')
print(msid + ' is not in ska fetch database')
continue
#--------------------------------------------------------------------------------
#-- find_data_collection_period: set start and stop time of data collection period
#--------------------------------------------------------------------------------
def find_data_collection_period(msid, catg, dtype):
"""
set start and stop time of data collection period
input: msid --- msid
catg --- category name of the msid
dtype --- data type: week, short, long
output: dfile --- data file name
stime --- starting time in seconds from 1998.1.1
etime --- stopping time in seconds from 1998.1.1
"""
#
#--- set today's date as the ending time
#
etime = today_date_chandra()
#
#--- week data are always extracted from two weeks ago up to today
#
if dtype == 'week':
dfile = data_dir + catg + '/' + msid + '_week_data.fits'
stime = etime - 86400 * 14
#
#--- for others, find the last entry time from the exisiting fits data file
#
elif dtype == 'short':
dfile = data_dir + catg + '/' + msid + '_short_data.fits'
stime = find_last_entry_time(dfile, dtype, etime)
else:
dfile = data_dir + catg + '/' + msid + '_data.fits'
stime = find_last_entry_time(dfile, dtype, etime)
return [dfile, stime, etime]
#--------------------------------------------------------------------------------
#-- today_date_chandra: get today's time (0 hr) in seconds from 1998.1.1 ---
#--------------------------------------------------------------------------------
def today_date_chandra():
"""
get today's time (0 hr) in seconds from 1998.1.1
input: none
output: stime --- today's date (0 hr) in seconds from 1998.1.1
"""
today = time.strftime('%Y:%j:00:00:00', time.gmtime())
stime = Chandra.Time.DateTime(today).secs
return stime
#--------------------------------------------------------------------------------
#-- find_last_entry_time: find the last entry time --
#--------------------------------------------------------------------------------
def find_last_entry_time(dfile, dtype, today):
"""
find the last entry time
input: dfile --- fits data file name
dtype --- data type: week, short, long
today --- today's time in seconds from 1998.1.1
output: tend --- the last entry time in seconds from 1998.1.1
if the past file does not exist, a standard time is given
(two week for week data, two years for short,
and 1998.201 for the long)
"""
#
#--- check the previous fits data file exists. if it does, find the last entry time
#
if os.path.isfile(dfile):
hdout = pyfits.open(dfile)
data = hdout[1].data
dtime = data['time']
tend = dtime[-1]
hdout.close()
#
#--- otherwise, set a standard starting time
#
else:
if dtype == 'week':
tend = today - 86400 * 14 #--- two weeks ago
elif dtype == 'short':
tend = today - 86400 * 548 #--- 1.5 years ago
else:
tend = 48815999.0 #--- 1999.201
return tend
#--------------------------------------------------------------------------------
#-- update_data_file: update data file ---
#--------------------------------------------------------------------------------
def update_data_file(dfile, msid, dtype):
"""
update data file
input: dfile --- fits data file name
msid --- msid
dtype --- data type: week, short or long
output: dfile --- updated fits data file
"""
#
#--- the name of the fits file containing the new data section
#
if dtype == 'week':
lfile = msid + '_week_data.fits'
elif dtype == 'short':
lfile = msid + '_short_data.fits'
#
#--- for the short time data, remove data older than 1.5 years
#--- before appending the new data
#
if os.path.isfile(dfile):
today = today_date_chandra()
cut = today - 86400 * 548
remove_old_data_from_fits(dfile, cut)
else:
lfile = msid + '_data.fits'
#
#--- week data is just replaced, but others are appended if the past data exists
#
if (dtype != 'week') and os.path.isfile(dfile):
mcf.rm_files('./ztemp.fits')
mfo.appendFitsTable(dfile, lfile, './ztemp.fits')
cmd = 'mv -f ./ztemp.fits ' + dfile
os.system(cmd)
mcf.rm_files(lfile)
else:
cmd = 'mv ' + lfile + ' ' + dfile
os.system(cmd)
#--------------------------------------------------------------------------------
#-- extract_data_from_ska: extract data from ska database and created data fits file
#--------------------------------------------------------------------------------
def extract_data_from_ska(msid, start, stop, dtype, alimit, cnd_msid):
"""
extract data from ska database and created data fits file
input: msid --- msid
start --- period starting time in seconds from 1998.1.1
stop --- period ending time in seconds from 1998.1.1
dtype --- data type: week, short or long (blank is fine)
output: <msid>_<dtye>_data.fits
"""
period = dtype_to_period(dtype)
fdata = run_condtion_msid(msid, start, stop, period, alimit, cnd_msid)
if fdata != []:
create_fits_file(msid, fdata, dtype)
return True
else:
return False
#--------------------------------------------------------------------------------
#-- dtype_to_period: set data average interval period for a given data type --
#--------------------------------------------------------------------------------
def dtype_to_period(dtype):
"""
set data average interval period for a given data type
input: dtype --- data type: week, short or others
output: peiod --- time period in seconds
"""
if dtype == 'week':
period = 300.0
elif dtype == 'short':
period = 3600.0
else:
period = 86400.0
return period
#--------------------------------------------------------------------------------
#-- run_condtion_msid: extract data from ska database and analyze data --
#--------------------------------------------------------------------------------
def run_condtion_msid(msid, start, stop, period, alimit, cnd_msid):
"""
extract data from ska database and analyze data
input: msid --- msid
start --- starting time in seconds from 1998.1.1
stop --- stopping time in seconds from 1998.1.1
period --- data collection interval in seconds (e.g. 300, 3600, or 86400)
alimit --- a list of lists of limits
cnd_msid ---- msid which tells which limit set to use for given time
output: save --- a list of list of data:
time, average, median, std, min, max,
ratio of yellow lower violation,
ratio of yellow upper violation,
ratio of rd lower violation,
ratio of red upper violation,
total data in the period,
yellow lower limit, yellow upper limit,
red lower limit, red upper limit
state
"""
#
#--- extract data with ska fetch for the given time period
#
out = fetch.MSID(msid, start, stop)
ok = ~out.bads
dtime = out.times[ok]
if len(dtime) < 1:
return []
tdata = out.vals[ok]
tmax = dtime[-1]
#
#--- for the case this is multi limit case
#
if cnd_msid != 'none':
out = fetch.MSID(cnd_msid, start, stop)
mtime = out.times
mdata = out.vals
mlen = len(mdata)
#
#--- for the case this is single limit case
#
else:
mdata = ['none'] * len(dtime)
#
#--- there are 15 elements to keep in the output data
#
save = []
for k in range(0, 16):
save.append([])
#
#--- compute how many data collection periods exist for a given data period
#
n_period = int((stop - start) / period) + 1
#
#--- collect data in each time period and compute statistics
#
for k in range(0, n_period):
begin = start + k * period
end = begin + period
ctime = begin + 0.5 * period
#
#--- find the state of condition msid for this period of time
#
if cnd_msid == 'none':
mkey = 'none'
else:
pos = int(mlen * begin /tmax) - 1
if pos < 0:
pos = 0
if pos >= mlen:
pos = mlen -1
mkey = mdata[pos].lower()
#
#--- set limit range only once at the beginning of each data collection period
#
try:
limit_table = find_limits(begin, mkey, alimit)
[y_low, y_top, r_low, r_top] = limit_table
except:
limit_table = [-9999998.0, 9999998.0, -9999999.0, 9999999.0]
[y_low, y_top, r_low, r_top] = [-9999998.0, 9999998.0, -9999999.0, 9999999.0]
#
#--- select data between the period
#
ind = dtime >= begin
btemp = dtime[ind]
sdata = tdata[ind]
ind = btemp < end
sdata = sdata[ind]
dcnt = len(sdata)
if dcnt < 1:
continue
#
#--- get stats
#
dmin = min(sdata)
dmax = max(sdata)
avg = numpy.mean(sdata)
#
#--- if the value is too large something is wrong: so skip it
#
if abs(avg) > 100000000.0:
continue
med = numpy.median(sdata)
std = numpy.std(sdata)
#
#--- count number of violations
#
[y_lc, y_uc, r_lc, r_uc] = find_limit_violatons(sdata, limit_table)
#
#--- save the resuts
#
save[0].append(float(int(ctime)))
save[1].append(float("%3.2f" % avg))
save[2].append(float("%3.2f" % med))
save[3].append(float("%3.2f" % std))
save[4].append(float("%3.2f" % dmin))
save[5].append(float("%3.2f" % dmax))
save[6].append(float("%1.3f" % (y_lc /dcnt)))
save[7].append(float("%1.3f" % (y_uc /dcnt)))
save[8].append(float("%1.3f" % (r_lc /dcnt)))
save[9].append(float("%1.3f" % (r_uc /dcnt)))
save[10].append(dcnt)
save[11].append(float("%3.2f" % y_low))
save[12].append(float("%3.2f" % y_top))
save[13].append(float("%3.2f" % r_low))
save[14].append(float("%3.2f" % r_top))
save[15].append(mkey)
return save
#--------------------------------------------------------------------------------
#-- find_limit_violatons: count numbers of yellow/red violation in the given data set
#--------------------------------------------------------------------------------
def find_limit_violatons(sdata, limit_table):
"""
count numbers of yellow/red violation in the given data set
input: sdata --- a list of data
limit_table --- a list of limit values
this could contain two set of limit values
output: [y_lc, y_uc, r_lc, r_uc]
"""
#
#--- count number of violations: multi limit set case
#
if isinstance(limit_table[0], list):
y_lc = 0
y_uc = 0
r_lc = 0
r_uc = 0
for val in sdata:
#
#--- no violation
#
for ltable in limit_table:
if (val > ltable[0]) and (val < ltable[1]):
continue
for ltable in limit_table:
#
#--- yellow violation
#
if (val >ltable[2]) and (val <= ltable[0]):
y_lc += 1
continue
if( val < ltable[3]) and (val >= ltable[1]):
y_uc += 1
continue
#
#--- red violation
#
if (val < ltable[2]):
r_lc += 1
continue
if (val > ltable[3]):
r_uc += 1
continue
#
#--- single set of limit case
#
else:
[y_low, y_top, r_low, r_top] = limit_table
ind = sdata < r_low
r_lc = len(sdata[ind]) #--- red lower violation
ind = sdata < y_low
y_lc = len(sdata[ind]) - r_lc #--- yellow lower violation
ind = sdata > r_top
r_uc = len(sdata[ind]) #--- red upper violation
ind = sdata > y_top
y_uc = len(sdata[ind]) - r_uc #--- yellow upper violation
return [y_lc, y_uc, r_lc, r_uc]
#--------------------------------------------------------------------------------
#-- find_limits: find a set of limit for the given time and what condition msid indicates
#--------------------------------------------------------------------------------
def find_limits(stime, mkey, alimit):
"""
find a set of limit for the given time and what condition msid indicates
input: stime --- tine in seconds from 1998.1.1
mkey --- condtion given by condtion msid
alimit --- a full limit table
the structure of alimit is:
[
[<start>,<stop>,<switch value list>,
<limit dictionary with the switch as key>
]
]
output: [y_low, y_top, r_low, r_top]
"""
stime = int(stime)
mkey = mkey.strip()
ltable = []
for k in range(0, len(alimit)):
begin = alimit[k][0]
end = alimit[k][1]
if (stime >= begin) and (stime < end):
try:
ltable = alimit[k][3][mkey]
except:
ltable = alimit[k][3]['none']
break
if ltable == []:
ltable = [-9999998.0, 9999998.0, -9999999.0, 9999999.0]
return ltable
#--------------------------------------------------------------------------------
#-- create_fits_file: create a fits file --
#--------------------------------------------------------------------------------
def create_fits_file(msid, data, dtype):
"""
create a fits file
input: msid --- msid
data --- a list of list of data
dtype --- data type (week, short, or others)
output: ./<msid>_<dtype>_data.fits
"""
cols = col_names
cols[1] = msid
c1 = Column(name=cols[0], format=col_format[0], array = data[0])
c2 = Column(name=cols[1], format=col_format[1], array = data[1])
c3 = Column(name=cols[2], format=col_format[2], array = data[2])
c4 = Column(name=cols[3], format=col_format[3], array = data[3])
c5 = Column(name=cols[4], format=col_format[4], array = data[4])
c6 = Column(name=cols[5], format=col_format[5], array = data[5])
c7 = Column(name=cols[6], format=col_format[6], array = data[6])
c8 = Column(name=cols[7], format=col_format[7], array = data[7])
c9 = Column(name=cols[8], format=col_format[8], array = data[8])
c10 = Column(name=cols[9], format=col_format[9], array = data[9])
c11 = Column(name=cols[10], format=col_format[10], array = data[10])
c12 = Column(name=cols[11], format=col_format[11], array = data[11])
c13 = Column(name=cols[12], format=col_format[12], array = data[12])
c14 = Column(name=cols[13], format=col_format[13], array = data[13])
c15 = Column(name=cols[14], format=col_format[14], array = data[14])
c16 = Column(name=cols[15], format=col_format[15], array = data[15])
coldefs = pyfits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16])
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
if dtype == 'week':
ofits = msid + '_week_data.fits'
elif dtype == 'short':
ofits = msid + '_short_data.fits'
else:
ofits = msid + '_data.fits'
mcf.rm_files(ofits)
tbhdu.writeto(ofits)
#--------------------------------------------------------------------------------
#-- remove_old_data_from_fits: remove old part of the data from fits file --
#--------------------------------------------------------------------------------
def remove_old_data_from_fits(fits, cut):
"""
remove old part of the data from fits file
input: fits --- fits file name
cut --- cut date in seconds from 1998.1.1
output: fits --- updated fits file
"""
#
#--- open the fits file
#
hbdata = pyfits.open(fits)
data = hbdata[1].data
cols = hbdata[1].columns
col_list = cols.names
hbdata.close()
#
#--- create a mask
#
dtime = data['time']
index = dtime > cut
#
#--- using the mask get only data > cut
#
udata = []
for col in col_list:
out = data[col]
nout = out[index]
udata.append(list(nout))
#
#--- update the data and save then in the fits file
#
sfits = fits + '~'
cmd = 'mv ' + fits + ' ' + sfits
os.system(cmd)
try:
ecf.create_fits_file(fits, cols, udata)
mcf.rm_file(sfits)
except:
cmd = 'mv ' + sfits + ' ' + fits
os.system(cmd)
#--------------------------------------------------------------------------------
#-- create_category_dict: create msid <---> category dict --
#--------------------------------------------------------------------------------
def create_category_dict():
"""
create msid <---> category dict
input: none but read from <house_keeping>/msid_list
output: catg_dict
"""
ifile = limit_dir + 'house_keeping/msid_list'
data = mcf.read_data_file(ifile)
catg_dict = {}
for ent in data:
atemp = re.split('\s+', ent)
catg_dict[atemp[0]] = atemp[1]
return catg_dict
#--------------------------------------------------------------------------------
if __name__ == "__main__":
#
#--- Create a lock file and exit strategy in case of race conditions
#
name = os.path.basename(__file__).split(".")[0]
user = getpass.getuser()
if os.path.isfile(f"/tmp/{user}/{name}.lock"):
sys.exit(f"Lock file exists as /tmp/{user}/{name}.lock. Process already running/errored out. Check calling scripts/cronjob/cronlog.")
else:
os.system(f"mkdir -p /tmp/mta; touch /tmp/{user}/{name}.lock")
parser = argparse.ArgumentParser()
parser.add_argument('-p','--period',help='Process specific time length. Choices are last two weeks, 1.5 years, or since 1999:201 respectively', \
action="extend",nargs='*',type=str, choices=["week","short","long"])
parser.add_argument("-m","--msid_list",help="File name of msid list to use from housekeeping",type=str)
parser.add_argument("--msid", help="Process specific MSID",type=str)
parser.add_argument("--start", help="Start time in seconds from 1998.1.1",type=float)
parser.add_argument("--stop", help="Stop time in seconds from 1998.1.1",type=float)
args = parser.parse_args()
if args.msid is not None:
[lim_dict, cnd_dict] = rlt.get_limit_table()
alimit = lim_dict[args.msid]
cnd_msid = cnd_dict[args.msid]
if args.period is not None:
for dtype in args.period:
if args.msid is not None:
extract_data_from_ska(args.msid, args.start, args.stop, dtype, alimit, cnd_msid)
elif args.msid_list is not None:
run_for_msid_list(args.msid_list, dtype)
else:
run_glimmon_trend_data_update()
#
#--- Remove lock file once process is completed
#
os.system(f"rm /tmp/{user}/{name}.lock")
|
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
import pywt
import pywt.data
denoised_level = 3
def sgn(num):
if(num > 0.0):
return 1.0
elif(num == 0.0):
return 0.0
else:
return -1.0
# Construct Gabor filter
def build_filters():
filters = []
ksize = [7,9,11,13,15,17] # 6 gabor scales
lamda = np.pi/2.0 # wave length
for theta in np.arange(0, np.pi, np.pi / 4): #gabor direction,0°,45°,90°,135°,共四个
for K in range(6):
kern = cv2.getGaborKernel((ksize[K], ksize[K]), 1.0, theta, lamda, 0.5, 0, ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
plt.figure(1)
# plot gabor filters
for temp in range(len(filters)):
plt.subplot(4, 6, temp + 1)
plt.imshow(filters[temp])
plt.show()
return filters
# Gabor filtering process
def process(img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv2.filter2D(img, cv2.CV_8UC1,kern)
np.maximum(accum, fimg, accum)
return accum
# Gabor features extraction
def getGabor(img,filters):
res = [] # filter result
for i in range(len(filters)):
res1 = process(img, filters[i])
res.append(np.asarray(res1))
# Demonstrate filter result
plt.figure(2)
for temp in range(len(res)):
plt.subplot(4,6,temp+1)
plt.imshow(res[temp], cmap='gray' )
plt.show()
return res
def denoise(original_img,lev):
# Wavelet transform of image
coeffs2 = pywt.wavedec2(original_img, 'bior3.5', level = denoised_level)
###'a' parameter in eclectic function of hard and soft threshold
a = 0.5
##################Denoise#########################
thcoeffs2 =[]
for t in range(1, len(coeffs2)):
tempcoeffs2 = []
for i in range(0,3):
tmp = coeffs2[t][i].copy()
Sum = 0.0
for j in coeffs2[t][i]:
for x in j:
Sum = Sum + abs(x)
N = coeffs2[t][i].size
Sum = (1.0 / float(N)) * Sum
sigma = (1.0 / 0.6745) * Sum
lamda = sigma * math.sqrt(2.0 * math.log(float(N), math.e))
for x in tmp:
for y in x:
if(abs(y) >= lamda):
y = sgn(y) * (abs(y) - a * lamda)
else:
y = 0.0
tempcoeffs2.append(tmp)
thcoeffs2.append(tempcoeffs2)
usecoeffs2 = []
usecoeffs2.append(coeffs2[0])
usecoeffs2.extend(thcoeffs2)
#denoised_img correspond to denoised image
denoised_img = pywt.waverec2(usecoeffs2, 'bior3.5')
##################Display#########################
titles = ['Initial Image', ' Denoised Image']
fig = plt.figure(figsize=(12, 3))
#Display the original image
ax = fig.add_subplot(1, 2, 1)
ax.imshow(original_img, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[0], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
#Display the denoised image
ax = fig.add_subplot(1, 2, 2)
ax.imshow(denoised_img, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[1], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
plt.show()
return denoised_img
def get_glgcm_features(mat):
'''We base on Gray Level-Gradient Co-occurrence Matrix to calculate texture features,which includes small gradients dominance, big gradients dominance, gray level asymmetry, gradients asymmetry, energy, gray level mean, gradients mean,
gray level variance, gradients variance, correlation, gray level entropy, gradients entropy, mixed entropy, inertia and inverse difference moment'''
sum_mat = mat.sum()
small_grads_dominance = big_grads_dominance = gray_asymmetry = grads_asymmetry = energy = gray_mean = grads_mean = 0
gray_variance = grads_variance = corelation = gray_entropy = grads_entropy = entropy = inertia = differ_moment = 0
sum_of_squares = 0
for i in range(mat.shape[0]):
gray_variance_temp = 0
for j in range(mat.shape[1]):
small_grads_dominance += mat[i][j] / ((j + 1) ** 2)
big_grads_dominance += mat[i][j] * j ** 2
energy += mat[i][j] ** 2
if mat[i].sum() != 0:
gray_entropy -= mat[i][j] * np.log(mat[i].sum())
if mat[:, j].sum() != 0:
grads_entropy -= mat[i][j] * np.log(mat[:, j].sum())
if mat[i][j] != 0:
entropy -= mat[i][j] * np.log(mat[i][j])
inertia += (i - j) ** 2 * np.log(mat[i][j])
differ_moment += mat[i][j] / (1 + (i - j) ** 2)
gray_variance_temp += mat[i][j] ** 0.5
gray_asymmetry += mat[i].sum() ** 2
gray_mean += i * mat[i].sum() ** 2
gray_variance += (i - gray_mean) ** 2 * gray_variance_temp
for j in range(mat.shape[1]):
grads_variance_temp = 0
for i in range(mat.shape[0]):
grads_variance_temp += mat[i][j] ** 0.5
grads_asymmetry += mat[:, j].sum() ** 2
grads_mean += j * mat[:, j].sum() ** 2
grads_variance += (j - grads_mean) ** 2 * grads_variance_temp
small_grads_dominance /= sum_mat
big_grads_dominance /= sum_mat
gray_asymmetry /= sum_mat
grads_asymmetry /= sum_mat
gray_variance = gray_variance ** 0.5
grads_variance = grads_variance ** 0.5
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
corelation += (i - gray_mean) * (j - grads_mean) * mat[i][j]
glgcm_features = [small_grads_dominance, big_grads_dominance, gray_asymmetry, grads_asymmetry, energy, gray_mean, grads_mean,
gray_variance, grads_variance, corelation, gray_entropy, grads_entropy, entropy, inertia, differ_moment]
return np.round(glgcm_features, 4)
def glgcm(original_img, ngrad=16, ngray=16):
'''Gray Level-Gradient Co-occurrence Matrix,after normalization,set both gray level value and gradients value to 16'''
img_gray = denoise(original_img,denoised_level)
# utilize sobel operator to calculate gradients value on x-y directons each
gsx = cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=3)
gsy = cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=3)
height, width = img_gray.shape
grad = (gsx ** 2 + gsy ** 2) ** 0.5 # Calculate gradients
grad = np.asarray(1.0 * grad * (ngrad-1) / grad.max(), dtype=np.int16)
gray = np.asarray(1.0 * img_gray * (ngray-1) / img_gray.max(), dtype=np.int16) # range 0-255 transformed into 0-15
gray_grad = np.zeros([ngray, ngrad]) # Gray Level-Gradient Co-occurrence Matrix
for i in range(height):
for j in range(width):
gray_value = gray[i][j]
grad_value = grad[i][j]
gray_grad[gray_value][grad_value] += 1
gray_grad = 1.0 * gray_grad / (height * width) # Normalize gray level-gradient co-occurrence matrix to reduce the amount of calculation
glgcm_features = get_glgcm_features(gray_grad)
return list(glgcm_features)
def features(img):
features = glgcm(img, ngrad=16, ngray=16)
coeffs2 = pywt.wavedec2(img, 'coif5') # utilize 2D discrete wavelet transform to derive approximation and detail coefficients on every scales within the limits of maximum decomposition level
#coeffs2 = getGabor(img, build_filters())
energy_wav = []
tmp = coeffs2[0].copy()
Sum = 0
tmp = 1.0 * tmp / (tmp.size) # normalize coefficient array
for x in tmp:
for y in x:
Sum += pow(y,2) # calculate energy of each coefficient component
energy_wav.append(Sum)
for t in range(1, len(coeffs2)):
for i in range(0,3):
tmp = coeffs2[t][i].copy()
tmp = 1.0 * tmp / (tmp.size)
Sum = 0
for x in tmp:
for y in x:
Sum += pow(y,2) # calculate energy of each coefficient component
energy_wav.append(Sum)
# select inverse difference moment,sum of squares of gray level-gradients co-occurence matrix and energy approximation after 2D DWT to form the finalized features vector
vector = [features[4],features[14]]
vector.extend(energy_wav)
return vector
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from radius_curve import measure_curvature_real
from curve_pixels import measure_curvature_pixels
from prev_poly import fit_poly, search_around_poly
from sliding_window import find_lane_pixels, fit_polynomial
from GradientHelpers import dir_threshold, mag_thresh
from process_image import process_image
from HelperFunctions import weighted_img, draw_lines, extrapolateLine
plt.ion()
image = mpimg.imread('../images/bridge_shadow.jpg')
# Edit this function to create your own pipeline.
def pipeline(img, s_thresh=(170, 255), sx_thresh=(15, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h_channel = hls[:,:,0]
h_thresh = (20,35)
sy_thresh = (0,100)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
mag_binary = mag_thresh(image, sobel_kernel=9, mag_thresh=(10, 255))
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel_x = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Sobel y
sobely = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobely = np.absolute(sobely) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel_y = np.uint8(255*abs_sobely/np.max(abs_sobely))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel_x)
sxbinary[(scaled_sobel_x >= sx_thresh[0]) & (scaled_sobel_x <= sx_thresh[1])] = 1
# Threshold y gradient
sybinary = np.zeros_like(scaled_sobel_y)
sybinary[(scaled_sobel_y >= sy_thresh[0]) & (scaled_sobel_y <= sy_thresh[1])] = 1
# Threshold s-color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Threshold h-color channel
h_binary = np.zeros_like(h_channel)
h_binary[(h_channel >= h_thresh[0]) & (h_channel <= h_thresh[1])] = 1
# Stack each channel
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
dir_binary = dir_threshold(image, sobel_kernel=9, thresh=(0.7, 1.3))
# Combine the two binary thresholds
combined_binary = np.zeros_like(dir_binary)
# combined_binary[((s_binary == 1) & (h_binary == 1)) | (sxbinary == 1)] = 1
# combined_binary[(( (sxbinary == 1)) | (dir_binary==1)) | ((s_binary==1) )] = 1
# combined_binary[(h_binary==1)] = 1
combined_binary[(((s_binary == 1) & (dir_binary==1)) | ((sxbinary == 1))) | ((h_binary==1))] = 1
return combined_binary
combined_binary = pipeline(image)
combined_binary = np.uint8(255*combined_binary/np.max(combined_binary))
# This time we are defining a four sided polygon to mask
imshape = combined_binary.shape
imageHeight = imshape[0]
imageWidth = imshape[1]
img_size = (imageWidth, imageHeight)
upperLeftVertex = imageWidth/2 - imageWidth/34, imageHeight/1.8
upperRightVertex = imageWidth/2 + imageWidth/34, imageHeight/1.8
lowerRightVertex = imageWidth*0.925, imageHeight
lowerLeftVertex = imageWidth*0.075, imageHeight
# plt.imshow(combined_binary)
# plt.plot(720, 450, '.')
# plt.plot(1100, 700, '.')
# plt.plot(200, 700, '.')
# plt.plot(600, 450, '.')
src = np.float32([[720,450],
[1100,700],
[200,700],
[600,450]])
dst = np.float32([[1000,200],
[1000,700],
[200,700],
[200,200]])
# For source points I'm grabbing the outer four detected corners
# src = np.float32([upperRightVertex, lowerRightVertex, lowerLeftVertex, upperLeftVertex])
# # For destination points, I'm arbitrarily choosing some points to be
# # a nice fit for displaying our warped result
# # again, not exact, but close enough for our purposes
# dst = np.float32([[700,400],[700,700],[400,700],[500,400]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(combined_binary, M, img_size)
def hist(img):
# Grab only the bottom half of the image
# Lane lines are likely to be mostly vertical nearest to the car
bottom_half = img[img.shape[0]//2:,:]
# Sum across image pixels vertically - make sure to set an `axis`
# i.e. the highest areas of vertical lines should be larger values
histogram = np.sum(bottom_half, axis=0)
return histogram
# Create histogram of image binary activations
# histogram = hist(warped)
# Visualize the resulting histogram
# plt.plot(histogram)
# Run image through the pipeline
# Note that in your project, you'll also want to feed in the previous fits
result = search_around_poly(warped)
# View your output
plt.imshow(result)
# Calculate the radius of curvature in meters for both lane lines
left_curverad, right_curverad = measure_curvature_real()
print(left_curverad, 'm', right_curverad, 'm')
# Should see values of 533.75 and 648.16 here, if using
# the default `generate_data` function with given seed number
# processed_image = process_image(warped)
# color=[int(255),int(0),int(0)]
# cv2.line(combined_binary, lowerRightVertex, upperRightVertex, color)
# cv2.line(combined_binary, lowerLeftVertex, upperLeftVertex, color)
# Plot the result
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
# f.tight_layout()
# ax1.imshow(image)
# ax1.set_title('Original Image', fontsize=40)
# ax2.imshow(warped)
# ax2.set_title('Combined', fontsize=40)
# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig('../images/pipeline_output.jpg')
|
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Ie()
driver.get("https://pan.baidu.com/")
driver.find_element_by_xpath("//*/div[@class='account-title']/a").click()
driver.find_element_by_xpath("//*/input[@id='TANGRAM__PSP_4__userName']").clear()
driver.find_element_by_xpath("//*/input[@id='TANGRAM__PSP_4__userName']").send_keys('13072723917')
driver.find_element_by_xpath("//*/input[@id='TANGRAM__PSP_4__password']").send_keys('xs3652302')
driver.find_element_by_xpath("//*/input[@id='TANGRAM__PSP_4__submit']").click()
ebook = driver.find_element_by_xpath('//*[@id="layoutMain"]/div/div[2]/div/div[3]/div/div/dd[3]/div[2]/div[1]/a')
#dd.g-clearfix:nth-child(3) > div:nth-child(3) > div:nth-child(1) > a:nth-child(1)
#dd.g-clearfix:nth-child(2) > div:nth-child(3) > div:nth-child(1) > a:nth-child(1)
#//*[@id="layoutMain"]/div/div/div[2]/div/div/div[3]/div/div/dd[3]/div[2]/div[1]/a
#/html/body/div[1]/div[2]/div[2]/div/div/div/div/div/div[2]/div/div/div[3]/div/div/dd[3]/div[2]/div[1]/a
#<a href="javascript:void(0);" class="avr1JMg" title="电子书">电子书</a>
ActionChains(driver).context_click(ebook).perform()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from paypal.pro.models import PayPalNVP
class PayPalNVPAdmin(admin.ModelAdmin):
list_display = ('user', 'ipaddress', 'method', 'flag', 'flag_code', 'created_at')
list_filter = ('flag', 'created_at')
search_fields = ('user__email', 'ipaddress', 'flag', 'firstname', 'lastname')
admin.site.register(PayPalNVP, PayPalNVPAdmin)
|
import sched, time
import requests
import json
import random
temp = 70
hum = 50
s = sched.scheduler(time.time, time.sleep)
print 'Start'
def fakeData(data):
data += random.randrange(-3,3)
def send_info(sc):
temp = fakeData(temp)
hum = fakeData(hum)
payload = {'temp': temp, 'hum': hum}
r = requests.post("http://localhost:8000/", data=json.dumps(payload))
print(r.text)
s.enter(1, 1, send_info, (sc,))
s.enter(1, 1, send_info , (s,))
s.run()
|
#region Import Modules
from fluid_properties import *
from auxiliary_functions import *
import numpy as np
import pandas as pd
import math
from scipy import interpolate
from pyXSteam.XSteam import XSteam
import matplotlib.pyplot as plt
import pprint
#endregion
case = {'material': ['air'], 'environment_conditions': ['case_1'], 'L': [0.4], 'W': [0.7], 'H': [2], 'epsilon': [0.94], 'H_rad': [0.6], 'L_rad':[0.3]}
df_case = pd.DataFrame(case)
case_nr=0
f_veq=0.7;
T_on=0
T_off=10
Q_resistance=1000;
environment_xls = pd.ExcelFile('environment_conditions.xlsx')
df_environment = environment_xls.parse(df_case['environment_conditions'].loc[case_nr])
material=df_case['material'].loc[case_nr]
H=df_case['H'].loc[case_nr]
L=df_case['L'].loc[case_nr]
W=df_case['W'].loc[case_nr]
L_rad=df_case['L_rad'].loc[case_nr]
H_rad=df_case['L_rad'].loc[case_nr]
epsilon=df_case['epsilon'].loc[case_nr]
V=L*W*H*f_veq
#row = [1.4, 1.01, 0.4,0.7,2] #adding new row to dataframe
#df_case.loc[len(df_case)] = row
#inputs:
t_step=60*10; #[s]
t_total=24; #[h]
t=np.arange(0, t_total*3600,t_step)
T=np.zeros((math.ceil(t_total*3600/t_step),1))
rho=np.zeros((math.ceil(t_total*3600/t_step),1))
cp=np.zeros((math.ceil(t_total*3600/t_step),1))
Q_rad=np.zeros((math.ceil(t_total*3600/t_step),1))
Q_rademit=np.zeros((math.ceil(t_total*3600/t_step),1))
Q_loss=np.zeros((math.ceil(t_total*3600/t_step),1))
Q_diss=np.zeros((math.ceil(t_total*3600/t_step),1))
Q_heating=np.zeros((math.ceil(t_total*3600/t_step),1))
T_amb=np.zeros((math.ceil(t_total*3600/t_step),1))
I_rad=np.zeros((math.ceil(t_total*3600/t_step),1))
v_wind=np.zeros((math.ceil(t_total*3600/t_step),1))
fi_tamb=interpolate.interp1d(df_environment['t']*3600, df_environment['T_amb'])
T_amb[:,0]=fi_tamb(t)
fi_irad=interpolate.interp1d(df_environment['t']*3600, df_environment['I_rad'])
I_rad[:,0]=fi_irad(t)
fi_vwind=interpolate.interp1d(df_environment['t']*3600, df_environment['v_wind'])
v_wind[:,0]=fi_vwind(t)
T[0]=T_amb[0] #Cabinet is in thermal equilibrium with environment
A_ht = 2 * ( L * W + L * H + W * H) #[m^2]
A_rad=H_rad*L_rad
L_characteristic=H
for i in range(1,len(t)):
cp[i-1]=thermal_properties('cp', material, T[i-1])
rho[i - 1] = thermal_properties('rho', material, T[i - 1])
cp[i-1]=1005
rho[i-1]=1.2
Q_heating[i]=heating_resistance(T[i-1],Q_resistance,T_on,T_off)
#Q_loss[i]=external_natural_convection(T[i-1], T_amb[i], material, L_characteristic, A_ht)
Q_loss[i]=external_forced_convection(T[i-1],T_amb[i],material,v_wind[i],L,W,H)
Q_rademit[i]=epsilon * (5.67 * 10 ** (-8)) * A_ht * ((T_amb[i]+273.15) ** 4 - (T[i - 1]+273.15) ** 4)
#Q_rad[i] = I_rad[i] * A_rad
T[i]=T[i-1]+((Q_rad[i]+Q_loss[i]+Q_rademit[i]+Q_diss[i]+Q_heating[i])*((t[i]-t[i-1])/(rho[i-1]*V*cp[i-1]*1000)))
if i % 360 == 0:
print('Iteration: ' + str(i) + '; T= ' + str(T[i]))
#region Plots
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
fig.suptitle('1-D Transient Model')
ax1.plot(t, T,'b-')
ax1.plot(t, T_amb,'k-')
ax2.plot(t, Q_heating,'r')
#endregion
|
import torch
from torch.utils import benchmark
from .modules import MSA
batch, seqlen, dmodel, h = 2, 9, 512, 8
x = torch.randn(batch, seqlen, dmodel)
msa = MSA(dmodel, h)
t_cpu = benchmark.Timer(
stmt="with torch.no_grad(): msa.forward_einsum(x)", globals={"x": x, "msa": msa}
)
print(t_cpu.timeit(100))
msa = MSA(dmodel, h).cuda()
x = torch.randn(batch, seqlen, dmodel).cuda()
t_gpu = benchmark.Timer(
stmt="with torch.no_grad(): msa.forward_einsum(x)", globals={"x": x, "msa": msa}
)
print(t_gpu.timeit(100))
"""
<torch.utils.benchmark.utils.common.Measurement object at 0x000001C2BAA16730>
with torch.no_grad(): msa.forward_einsum(x)
585.25 us
1 measurement, 100 runs , 1 thread
<torch.utils.benchmark.utils.common.Measurement object at 0x000001C2BAA16760>
with torch.no_grad(): msa.forward_einsum(x)
308.52 us
1 measurement, 100 runs , 1 thread
"""
|
def re_ordering(text):
output = text.split()
for x in text.split():
if x[0].isupper():
output.remove(x)
output.insert(0, x)
return " ".join(output)
'''
There is a sentence which has a mistake in it's ordering.
The part with a capital letter should be the first word.
Please build a function for re-ordering
Examples
>>> re_ordering('ming Yao')
'Yao ming'
>>> re_ordering('Mano donowana')
'Mano donowana'
>>> re_ordering('wario LoBan hello')
'LoBan wario hello'
>>> re_ordering('bull color pig Patrick')
'Patrick bull color pig'
'''
|
from django.views.generic import TemplateView, FormView
from django.core.urlresolvers import reverse
from django.conf import settings
from kazoo.client import KazooClient
from bees.forms import CreateNodeForm, EditNodeForm, DeleteNodeForm
ZK_CLIENT = KazooClient(hosts=settings.ZOOKEEPER_HOSTS)
ZK_CLIENT.start()
class ZookeeperClientMixin(object):
@property
def zk_client(self):
if hasattr(self, "_zk_client"):
return self._zk_client
self._zk_client = ZK_CLIENT
return self._zk_client
class PathMixin(object):
@property
def node_path(self):
return self.kwargs.get('path')
def get_initial(self):
result = super(PathMixin, self).get_initial()
result['path'] = self.node_path
return result
def get_context_data(self, **kwargs):
context_data = super(PathMixin, self).get_context_data(**kwargs)
context_data['node_path'] = self.node_path
return context_data
class NodeValueMixin(object):
"""Needs the ZookeeperClientMixin"""
_node_value = None
@property
def node_info(self):
return self.zk_client.get(self.node_path)
@property
def node_value(self):
if not self._node_value:
value, stats = self.zk_client.get(self.node_path)
self._node_value = value
return self._node_value
def get_initial(self):
result = super(NodeValueMixin, self).get_initial()
result['value'] = self.node_value
return result
def get_context_data(self, **kwargs):
context_data = super(NodeValueMixin, self).get_context_data(**kwargs)
value, stats = self.zk_client.get(self.node_path)
context_data['value'] = value
context_data['stats'] = stats
return context_data
class SetActiveViewMixin(object):
def get_context_data(self, **kwargs):
context = super(SetActiveViewMixin, self).get_context_data(**kwargs)
context['active_nav_menu'] = {
self.request.resolver_match.url_name: ' class="active"'
}
return context
class DirectoryListingMixin(object):
"""Needs the ZookeeperClientMixin"""
def get_context_data(self, **kwargs):
context_data = super(DirectoryListingMixin, self).get_context_data(**kwargs)
context_data['directories'] = self.zk_client.get_children(self.node_path)
return context_data
class BrowseNodeView(SetActiveViewMixin, DirectoryListingMixin, NodeValueMixin, PathMixin, ZookeeperClientMixin, TemplateView):
template_name = 'bees/browse_node.html'
class DeleteNodeView(SetActiveViewMixin, PathMixin, ZookeeperClientMixin, FormView):
template_name = 'bees/delete_node.html'
form_class = DeleteNodeForm
def form_valid(self, form):
result = super(DeleteNodeView, self).form_valid(form)
self.zk_client.delete(form.cleaned_data.get('path'))
return result
@property
def formated_parent_path(self):
return '/' + '/'.join([x for x in self.node_path.split('/') if x][:-1]) + '/'
def get_success_url(self):
return reverse('bees:browse_node', kwargs={'path': self.formated_parent_path})
class EditNodeView(SetActiveViewMixin, NodeValueMixin, PathMixin, ZookeeperClientMixin, FormView):
template_name = 'bees/edit_node.html'
form_class = EditNodeForm
def form_valid(self, form):
result = super(EditNodeView, self).form_valid(form)
zk_upload_value = str(form.cleaned_data.get('value'))
self.zk_client.set(form.cleaned_data.get('path'), zk_upload_value)
return result
def get_success_url(self):
return reverse('bees:browse_node', kwargs={'path': self.node_path})
class CreateNodeView(SetActiveViewMixin, PathMixin, ZookeeperClientMixin, FormView):
template_name = 'bees/create_node.html'
form_class = CreateNodeForm
def form_valid(self, form):
result = super(CreateNodeView, self).form_valid(form)
new_node_path = form.cleaned_data.get('path') + form.cleaned_data.get('node_name')
self.zk_client.ensure_path(new_node_path)
value = form.cleaned_data.get('value', '')
if value:
self.zk_client.set(new_node_path, value.encode())
return result
def get_success_url(self):
return reverse('bees:browse_node', kwargs={'path': self.node_path})
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from string import Template
from textwrap import indent
DEFAULT_TEMPLATE = """
def make_exe():
dist = default_python_distribution()
policy = dist.make_python_packaging_policy()
policy.extension_module_filter = "no-copyleft"
# Note: Adding this for pydanic and libs that have the "unable to load from memory" error
# https://github.com/indygreg/PyOxidizer/issues/438
policy.resources_location_fallback = "filesystem-relative:lib"
python_config = dist.make_python_interpreter_config()
$RUN_MODULE
exe = dist.to_python_executable(
name="$NAME",
packaging_policy=policy,
config=python_config,
)
exe.add_python_resources(exe.pip_install($WHEELS))
$UNCLASSIFIED_RESOURCE_INSTALLATION
return exe
def make_embedded_resources(exe):
return exe.to_embedded_resources()
def make_install(exe):
# Create an object that represents our installed application file layout.
files = FileManifest()
# Add the generated executable to our install layout in the root directory.
files.add_python_resource(".", exe)
return files
register_target("exe", make_exe)
register_target("resources", make_embedded_resources, depends=["exe"], default_build_script=True)
register_target("install", make_install, depends=["exe"], default=True)
resolve_targets()
"""
UNCLASSIFIED_RESOURCES_TEMPLATE = """
for resource in exe.pip_install($UNCLASSIFIED_RESOURCES):
resource.add_location = "filesystem-relative:lib"
exe.add_python_resource(resource)
"""
@dataclass(frozen=True)
class PyOxidizerConfig:
executable_name: str
wheels: list[str]
entry_point: str | None = None
template: str | None = None
unclassified_resources: list[str] | None = None
@property
def run_module(self) -> str:
return (
f"python_config.run_module = '{self.entry_point}'"
if self.entry_point is not None
else ""
)
def render(self) -> str:
unclassified_resource_snippet = ""
if self.unclassified_resources is not None:
unclassified_resource_snippet = Template(
UNCLASSIFIED_RESOURCES_TEMPLATE
).safe_substitute(UNCLASSIFIED_RESOURCES=self.unclassified_resources)
unclassified_resource_snippet = indent(unclassified_resource_snippet, " ")
template = Template(self.template or DEFAULT_TEMPLATE)
return template.safe_substitute(
NAME=self.executable_name,
WHEELS=self.wheels,
RUN_MODULE=self.run_module,
UNCLASSIFIED_RESOURCE_INSTALLATION=unclassified_resource_snippet,
)
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: proxy.py
Description: 自动从大象代理获得代理IP
Author: Dexter Chen
Date:2017-09-16
-------------------------------------------------
"""
import os
import requests
import utilities as ut
import config
def retrieve_proxy(proxy_number):
api_url = "http://vtp.daxiangdaili.com/ip/?tid=559131754091145&num=" + \
str(proxy_number) + "&delay=1&sortby=time"
proxies = requests.get(api_url, timeout=10).text
proxy_pool = []
for proxy in proxies.split("\n"):
proxy_record = ut.time_str("full"), proxy, 0, 0, 0
proxy_pool.append(proxy_record)
return proxy_pool
def is_usable(proxy_record): # 检测是否还能那个用
if int(proxy_record[2]) < config.proxy_max_used and int(proxy_record[3]) < config.proxy_max_fail and proxy_record[4] < config.proxy_max_c_fail: # 超出最大量、连续最多失败次、失败总次数都要了
return True
else:
return False
def update_pool(proxy_pool):
if len(proxy_pool):
proxy_pool = filter(is_usable, proxy_pool)
if len(proxy_pool) < config.proxy_pool_size:
retrieve_proxy(proxy_pool_size - len(proxy_pool)) # 缺多少,补多少
proxy_pool = sorted(proxy_pool, key=lambda x: x[2]) # 按照使用次数排序,用的最少的最先用
return proxy_pool
def is_online():
status = os.system("ping -c 1 www.163.com")
if status == 0:
return True
else:
return False
def get_proxy(): # 这里是在程序中获得proxy
proxy_pool = update_pool(proxy_pool)
proxy_pool[0][2] += 1 # 增加用了1次
return proxy_pool[0][1]
if __name__ == '__main__':
print retrieve_proxy(1)
|
import re
import sys
import json
import codecs
from ..feature_extractors import normalizer
from ..feature_extractors import tokenizer
input_filename = 'comments.json'
output_filename = 'comment_text.txt'
with codecs.open(input_filename, 'r', encoding='utf-8') as input_file:
lines = input_file.readlines()
body_count = 0
with codecs.open(output_filename, 'w', encoding='utf-8') as output_file:
body_count = 0
for line in lines:
parts = re.split('^data: ', line)
if len(parts) > 1:
text = parts[1]
try:
data = json.loads(text, encoding='utf-8')
if 'body' in data:
body = data['body']
body = body.encode('ascii', 'ignore')
body = re.sub('\n', ' ', body)
body = normalizer.fix_basic_punctuation(body)
tokens = []
sentences = tokenizer.split_sentences(body)
for s in sentences:
sent_tokens = tokenizer.make_ngrams(s, 1, reattach=True, split_off_quotes=True)
tokens = tokens + sent_tokens
output_file.write(' '.join(tokens) + "\n")
body_count += 1
except:
#e = sys.exc_info()[0]
#print e
pass
print("body count =", body_count)
print("total count =", len(lines))
|
from fabric.api import run, task, sudo, settings, env
from fabric.tasks import execute
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
from dateutil.parser import parse
from appconfig import APPS
from appconfig.config import App
from appconfig.tasks.deployment import pip_freeze
env.hosts = APPS.hostnames
ACC = [] # A global accumulator to store results across tasks
from appconfig.tasks import letsencrypt
@task
def ls():
"""list installed clld apps"""
sudo('supervisorctl avail')
sudo('psql -l', user='postgres')
@task
def renew_certs():
if confirm("Renew certificates: " + env.host_string + "?", default=False):
letsencrypt.require_certbot()
if exists('/etc/letsencrypt/live'):
certs = set(sudo('ls -1 /etc/letsencrypt/live').split())
else:
certs = set()
apps = set(a.domain for a in APPS.values() if a.production == env['host'])
apps.add(env['host'])
for cert in certs - apps:
# Obsolete certificate! The app is no longer deployed on this host.
letsencrypt.delete(cert)
for app in apps - certs:
letsencrypt.require_cert(app)
with settings(warn_only=True):
letsencrypt.renew()
@task
def last_deploy():
global ACC
with settings(warn_only=True):
for a in APPS.values():
if a.production == env.host and exists(str(a.config)):
res = parse(run('stat -c "%y" {0}'.format(a.config)))
ACC.append((a.name, res))
if env.host == env.hosts[-1]:
maxname = max(len(t[0]) for t in ACC)
for a, dt in sorted(ACC, key=lambda t: t[1], reverse=True):
print('{0}{1}'.format(
a.ljust(maxname + 1), dt.isoformat().replace('T', ' ').split('.')[0]))
@task
def pip_freeze_all():
"""
Attempts to write requirements.txt files for all apps in apps.ini into
their respective apps folder.
"""
def helper(app):
if type(app) is App:
execute(pip_freeze, app, host=app.production)
with settings(warn_only=True):
for a in APPS.values():
helper(a)
|
from rest_framework import serializers
from photos.models import Photo, Comment
from users.serializers import UserSerializer, PhotoShowSerializer
class PhotoSerializer(serializers.ModelSerializer):
user = PhotoShowSerializer(read_only=True)
class Meta:
model = Photo
fields = ['id', 'image', 'caption', 'posted_at', 'user']
class CommentSerializer(serializers.ModelSerializer):
user = PhotoShowSerializer(read_only=True)
photo = PhotoSerializer(read_only=True)
class Meta:
model = Comment
fields = ['id', 'photo', 'user', 'description', 'commented_at', ]
|
import numpy as np
import tensorflow as tf
from sklearn import model_selection
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import warnings
warnings.filterwarnings('ignore')
data = np.loadtxt("../../../data/sonar.csv", dtype=np.str, delimiter=",")
y_data = data[:, -1:]
y_data_list = list()
for n in y_data:
if n == 'R':
y_data_list.append(0)
else:
y_data_list.append(1)
# m = map(lambda n: 0 if n == "R" else 1, data[:, -1])
x_data = np.float32(data[:, :-1])
y_data = np.array(y_data_list, dtype=np.float32).reshape(len(y_data_list), 1)
# print(y_data)
x_train, x_test, y_train, y_test = model_selection.train_test_split(x_data, y_data, test_size=0.3)
# print(x_train)
# print(x_test)
print(y_train.shape)
print(y_test.shape)
IO = Dense(units=1, input_shape=[60], activation="sigmoid")
model = Sequential([IO])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
earlyStop = EarlyStopping(monitor="loss", patience=20, min_delta=0.001)
hisotry = model.fit(x_train, y_train, epochs=10000, callbacks=[earlyStop])
# sigmoid를 적용한 후 cast까지 완료해서 결과 전달
predict = model.predict_classes(x_test)
print(predict)
model.evaluate(x_test, y_test)
|
"""secret_messages Class for the Affine Cipher"""
import string
from ciphers import Cipher
class AffineCipher(Cipher):
"""Class to encrypt/decrypt text with the Affine Cipher
Keyword Arguments:
Cipher = Top-level class that raises NotImplementedError if run
Methods:
__init__
encrypt
decrypt
"""
def __init__(self, a, b):
"""Method to create instance of the Affine cipher.
Description:
Maps every letter in string.printable to a number 0-94
Keyword Arguments:
key = key entered by user, must be longer than user_text
"""
self.a = a
self.b = b
self.ALPHA = string.printable[:95]
self.mapping = list(zip(self.ALPHA, range(95)))
def encrypt(self, text):
"""Method to encrypt text with the Affine cipher instance
Description:
Plug a and b into the ciphernum formula, add the result to the output
list and return
Return:
String version of the output list joined by ''.
Keyword Arguments:
user_text = text entered by user, to be converted into padded output
"""
output = []
# text = text.upper()
x_list = []
mods = []
for char in text:
for pair in self.mapping:
if char in pair:
x_list.append(pair[1])
for x in x_list:
ciphernum = (self.a * x + self.b) % 95
mods.append(ciphernum)
for num in mods:
for pair in self.mapping:
if num in pair:
output.append(pair[0])
return ''.join(output)
def decrypt(self, text):
"""Method to decrypt text with the Affine cipher instance
Description:
Reverse of the (a*x + b) % 95 formula
Return:
String version of the output list joined by ''.
Keyword Arguments:
text = text entered by user, to be converted into padded output
"""
output = []
# text = text.upper()
a1_list = []
x_list = []
mods = []
for num in list(range(200)):
if (self.a * num) % 95 == 1:
a1_list.append(num)
for char in text:
for pair in self.mapping:
if char in pair:
x_list.append(pair[1])
for x in x_list:
deciphernum = a1_list[0]*(x - self.b) % 95
mods.append(deciphernum)
for num in mods:
for pair in self.mapping:
if num in pair:
output.append(pair[0])
return ''.join(output)
|
n = int(input())
string = input()
curr_pos = sub_len = 0
length = []
temp = 0
for i in range(n):
if string[i] == 'x':
sub_len += 1
if string[i] != 'x' or i == n - 1:
length.append(sub_len)
sub_len = 0
# print(string[i], sub_len)
print(sum([i-2 for i in length if i > 2]))
|
from users import Users, Logs, DB_PATH
from users import Interface
if __name__ == "__main__":
users = Users(db_path=DB_PATH)
logs = Logs(db_path=DB_PATH)
menu = Interface(users=users, logs=logs)
print("\nAll Users:")
print("pkey, user_email, pw, register_date, locked_until")
for i in users.read_all():
print(i)
print("\nAll logs:")
print("pkey, access_attempt_time, success, user_id")
for i in logs.read_all():
print(i)
|
import os
import cfg
def calc_dice(test_names, results_dir):
for p_name in test_names:
# TODO: feature: ori data required to be process
truth = str(cfg.seg_dir.joinpath(f'{p_name}_seg.mha'))
predict = str(results_dir.joinpath(f'{p_name}_prd_bin.png'))
output_xml = str(results_dir.joinpath(f'{p_name}.xml'))
cli = f'{cfg.evaluator} {truth} {predict} -use all -xml {output_xml}'
print(f'excute command: {cli}')
os.system(cli)
|
import logging
class Response(object):
"""The object sent back to the callback
Contains methods for calling senders and responders on Espresso
"""
def __init__(self, robot, msg, match):
self.robot = robot
self.msg = msg
self.match = match
def send(self, message, channel=None):
"""Sends a message to the messaging system."""
channel = self.msg.channel.name or channel
self.robot.send(message, channel)
def reply(self, user, message, channel=None):
"""Sends a message to the messaging system which is a reply to a user."""
channel = self.msg.channel.name or channel
logging.debug("message %s on channel #%s to user @%s", message, channel, user.name)
self.robot.send("@{}: {}".format(user.name, message), channel)
|
#coding: utf-8
print 'Bem vindo ao sistema de rotatividade de snapshots na aws'
print ''
print 'Opção 1 - Cadastrar volume para ser deletado'
print 'Opção 2 - Listar volumes cadastrados atualmente'
option = str(raw_input("Digite qual a opção desejada: 1 ou 2: "))
if option == '1':
volumes = open("volumes.txt", "a")
vol = str(raw_input("Insira o nome do volume: "))
desc = str(raw_input("Insira a descricao do volume: "))
ret = int(raw_input("Insira a qtde de dias de retenção: "))
volumes.write("%s:%s:%s:\n" %(vol, desc, ret))
volumes.close()
if option == '2':
volumes = open("volumes.txt", "r")
line = volumes.read()
print line
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-25 12:05:34
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
'''
文件存储格式如下:
id,name,age,phone,job
1,Alex,22,13651054608,IT
2,Egon,23,13304320533,Tearcher
3,nezha,25,1333235322,IT
现在需要对这个员工信息文件进行增删改查。
基础必做:
a.可以进行查询,支持三种语法:
select 列名1,列名2,… where 列名条件
支持:大于小于等于,还要支持模糊查找。
示例:
select name, age where age>22
select * where job=IT
select * where phone like 133
# select 查询这个文件
# select name,age where age>20
# select age,name,job where age > 20
# select age,name,job where age < 22
# select age,name,job where age = 22
# select age,name,job where name = 'alex'
# select age,name,job where job = 'IT'
# select age,name,job where phone like '133'
# select * where phone like '133'
进阶选做:
b.可创建新员工记录,id要顺序增加c.可删除指定员工记录,直接输入员工id即可
d.修改员工信息
语法:set 列名=“新的值” where 条件
#先用where查找对应人的信息,再使用set来修改列名对应的值为“新的值”
注意:要想操作员工信息表,必须先登录,登陆认证需要用装饰器完成
其他需求尽量用函数实现
'''
import sys
import re
def auth(f):
def inner(*args,**kwargs):
ret = f(*args,**kwargs)
return ret
return inner
l = []
with open('staff_info.txt',encoding='utf-8') as f:
for i in f:
temp = i.strip().split(',',i.strip().count(','))
if temp[0].isdigit():
l.append({'id':temp[0],'name':temp[1],'age':temp[2],'phone':temp[3],'job':temp[4]})
# print(l)
def search(select,column,where,condition):
if select=='select':
tp = column.strip().split(',',column.strip().count(','))
#l2 = [{age:22,name:alex,job:it},{}]
l2 = []
for i in l:
for key,value in i.items():
def run():
pass
if __name__ == '__main__':
run()
|
from flask_marshmallow import Marshmallow
from .model import Book
ma = Marshmallow()
def configure(app):
ma.init_app(app)
class BookSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Book
include_relationships = True
load_instance = True
|
import requests
import json
import datetime
class Guild:
def __init__(self, playerName: str = None, playeruuid: str = None):
self.playerName = playerName
self.playeruuid = playeruuid
if self.playerName == None and self.playeruuid == None:
raise AttributeError('You need to fill in either playerName or playeruuid')
if self.playerName != None and self.playeruuid != None:
raise AttributeError("You can't fill in both playerName or playeruuid")
self.nameValue = self.playerName if not playeruuid else playeruuid
self.get_stats_link = requests.get(f'https://api.slothpixel.me/api/guilds/{self.nameValue}')
self.stats_data = json.loads(self.get_stats_link.text)
def get_stats(self, item):
"""
To get the guild stats of a user's guild. The user does not have to be on Hypixel for this to work.
However, they must belong in a guild.
https://docs.slothpixel.me/#operation/getGuildFromPlayer
Items:
- :name:`string`
- :id:`string`
- :created`string`
- :tag:`string`
- :tag color:`string`
- :tag formatted:`string`
- :exp:`integer`
- :level:`integer`
- :exp by game:`integer`
- :description:`object`
- :preferred games:`list`
- :legacy_ranking:`integer` (LEGACY)
Params:
:item:`string`
`versionadded`: 1.0
"""
self.item = item
self.item = str(self.item).replace(" ", "_")
if self.item == 'created':
return datetime.datetime.fromtimestamp(round(self.stats_data[self.item]/1000))
elif self.item == 'preferred_games':
return list(self.stats_data[self.item])
return self.stats_data[self.item]
def get_exp_history(self, date: str):
"""
To get the guild exp history of a user. The user does not have to be on Hypixel for this to work.
https://docs.slothpixel.me/#operation/getGuildFromPlayer
Params:
- :date:`string` (e.g YYYY-MM-DD, 2020-01-26)
`versionadded`: 1.0
"""
self.date = date
return self.stats_data['exp_history'][self.date]
|
listt=[]
def countd(a):
for m in range(a,0,-1):
listt.append(m)
return listt
b=countd(5)
print(b)
#2 print and return
def printre(lis):
for i in range(0,2):
print(lis[0])
return lis[1]
print(printre([2,5]))
#3 First plus length
sum=0
def first_len(lis):
sum =lis[0]+len(lis)
return sum
print(first_len([4,5,6,8,1,2,3]))
#Values Greater than Second
lisNew=[]
def greater_than(lis):
for i in range(len(lis)):
if lis[i]>lis[1]:
lisNew.append(lis[i])
print(len(lisNew))
return lisNew
print(greater_than([3,2,5,6,7,9,1,0]))
#This Length, That Value
lis=[]
def this_that(a,b):
for i in range(a):
lis.append(b)
return lis
print(this_that(4, 5))
|
def esrever(s):
return s[:-1][::-1] + s[-1] if s else ''
|
#import mysql
import sqlite3
"""This program uses the Table and Query classes to generate the SQL
to create the groceries database described in the Facade chapter."""
class Database():
def __init__(self, *args):
#self._db = MySQLdb.connect(args[0], args[1], args[2])
self.host=args[0]
self.userid=args[1]
self.pwd = args[2]
self._cursor = self._db.cursor()
def commit(self):
self._db.commit()
def create(self, dbname):
self.cursor.execute("drop database if exists "+dbname)
self._cursor.execute("Create database "+ dbname)
self._dbname = dbname
#self._db=MySQLdb.connect(self.host, self.userid, self.pwd, dbname)
self.cursor.execute("use "+dbname)
self._cursor= self._db.cursor()
def getName(self):
return self._dbname
@property
def cursor(self):
return self._cursor
def getTables(self):
self._cursor.execute("show tables")
# create array of table objects
self.tables = []
rows = self._cursor.fetchall()
for r in rows:
self.tables.append(Table(self._cursor, r))
return self.tables
class ColumnNames():
def __init__(self, query):
self.query = query
def getColumnNames(self):
# make list of tokens
qlist = self.query.lower().split(' ')
# remove trailing commas and stop at first SQL keyword
newq = []
i = 0
quit = False
while i < len(qlist) and not quit:
ql = qlist[i].strip().removesuffix(',') #remove trailing commas
if ql in {'from', 'join', 'where', 'inner'}: #stop on SQL keyword
quit = True
else:
if ql not in { 'distinct', 'select'}:
newq.append(ql) #insert name in column list
i += 1
# now remove leading table names
# and split where there was a comma but no space
newq2 = []
for ql in newq:
if '.' in ql:
qa = ql.split('.') # remove table name
ql = qa[1]
if ',' in ql:
qa = ql.split(',') # split at comma
newq2.append(qa[0]) # when there is no space
newq2.append(qa[1]) # between column names
else:
newq2.append(ql)
return newq2 # return the column name array
# Query object makes queries and returns Results
class Query():
def __init__(self, cursor, *qstring):
self.qstring = qstring[0]
self.multiple=False
if len(qstring) >1:
self.vals = qstring[1]
self.multiple = True
self.cursor = cursor
def setMultiple(self, mult):
self.multiple = mult
# executes the query and returns all the results
def execute(self):
#print (self.qstring)
self.getCols = ColumnNames(self.qstring)
self.colNames = self.getCols.getColumnNames()
if not self.multiple:
self.cursor.execute(self.qstring)
rows = self.cursor.fetchall()
return Results(rows, self.colNames)
else:
self.cursor.executemany(self.qstring, self.vals)
def executeMultiple(self, vals):
#print (self.qstring, vals)
self.cursor.executemany(self.qstring, vals)
# Mediator used by columns and Table class to keep
# the primary key string used in creating the SQL
class Mediator() :
def __init__(self, db):
self.db = db
self.filename = ""
def setPrimaryString(self, prims):
self.pstring = prims
def getPrimaryString(self):
return self.pstring
# base class Column
class Column():
def __init__(self, name):
self._name=name
self._primary = False
def isPrimary(self):
return self._primary
@property
def name(self):
return self._name
# Integer column- may be a primary key
class Intcol(Column) :
def __init__(self, name, med:Mediator):
super().__init__(name)
self.med = med
def getName(self):
idname = self.name+" INT NOT NULL "
return idname
class PrimaryCol(Intcol):
def __init__(self, name, autoInc, med: Mediator):
super().__init__(name, med)
self.med = med
self.autoInc = autoInc
def getName(self):
idname = self.name + " INT NOT NULL "
if self.autoInc:
idname += "AUTO_INCREMENT "
self.med.setPrimaryString("PRIMARY KEY (" + self.name + ")")
return idname
# Float col
class Floatcol(Column):
def __init__(self, name):
super().__init__(name)
def getName(self):
idname = self.name + " FLOAT NOT NULL "
return idname
# character column - length is the 2nd argument
class Charcol(Column):
def __init__(self, name, width:int):
super().__init__(name)
self.width=width
def getName(self):
idname = self.name + " VARCHAR("+str(self.width)+") NULL "
return idname
# Table class used to create all the table
class Table():
def __init__(self, db, name, med:Mediator):
self.cursor = db.cursor
self.db = db
self.tname = name # first of tuple
self.colList=[] # list of column names generated
self._primarystring = ""
self.med = med
@property
def name(self): # gets table name
return self.tname
# add a column
def addColumn(self, column):
self.colList.append(column)
# creates the sql to make the columns
def addRow(self, varnames):
qry = "insert into " + self.tname + "("
i = 0
for i in range(1, len(self.colList) - 1):
c = self.colList[i]
# if type(c)==PrimaryCol:
qry += c.name + ","
qry += self.colList[-1].name + ") VALUES "
#for i in range(1, len(self.colList) - 1):
# qry += "\'%s\',"
# qry += "\'%s\') "
#qry += " , "
qry += varnames
query = Query(self.cursor, qry, "")
query.setMultiple(False)
query.execute()
self.db.commit()
# creates the sql to make the columns
def addRows(self, varnames):
qry = "insert into "+self.tname +"("
i = 0
for i in range(1, len(self.colList)-1):
c = self.colList[i]
#if type(c)==PrimaryCol:
qry += c.name + ","
qry += self.colList[-1].name+") VALUES ("
for i in range(1, len(self.colList) - 1):
qry += "\'%s\',"
qry +="\'%s\') "
query = Query(self.cursor, qry, varnames)
query.execute()
self.db.commit()
#deletes a row
def deleteRow(self, colname, key):
querytxt= "delete from "+self.tname+" where "+colname+ "="+key
#print(querytxt)
query = Query(querytxt)
# query.execute()
# self.db.commit()
# creates the table and columns
def create(self):
sql = "create table "+self.db.getName()+"."+ self.name+" ("
for col in self.colList:
sql += col.getName()+","
sql += self.med.getPrimaryString()
sql +=")"
#print (sql)
self.cursor.execute(sql)
# returns a list of columns
def getColumns(self):
self.cursor.execute("show columns from " + self.tname)
self.columns = self.cursor.fetchall()
return self.columns
# contains the result of a query
class Results():
def __init__(self, rows, colNames):
self.rows = rows
self.cnames = colNames
self.makeDict()
def makeDict(self):
self.dictRows = []
#print(self.rows, self.cnames)
for r in self.rows:
self.makeDictRow(r)
#print(self.dictRows)
def makeDictRow(self, row):
niter = iter(self.cnames)
dict = {}
for r in row:
dict[next(niter)] = r
self.dictRows.append(dict)
def getRows(self):
return self.rows
def getDictRows(self):
return self.dictRows
# holds primary key string as table is created
class Primary() :
primaryString = ""
# Table class used to create all the table
class SqltTable(Table):
def __init__(self, db, name):
self.cursor = db.cursor()
self.db = db
self.tname = name # first of tuple
self.colList=[] # list of column names generated
self._primarystring = ""
# creates the sql to make the columns--Sqlite differs slightly
def addRows(self, varnames):
qry = "insert into "+self.tname +"("
i = 0
for i in range(0, len(self.colList)-1):
c = self.colList[i]
qry += c.name + ","
qry += self.colList[-1].name+") values ("
for i in range(0, len(self.colList) - 1):
qry += "?,"
qry +="?);"
query = Query(self.cursor, qry, varnames)
#print(qry+"\n", varnames)
query.execute()
self.db.commit()
# creates the table and columns
def create(self):
sql = "create table " + self.name + " ("
for col in self.colList:
sql += col.getName()+","
sql += Primary.primaryString
sql +=");"
#print (sql)
self.cursor.execute(sql)
def getColumns(self):
tn = self.tname[0]
#print(self.tname)
sql="select name from pragma_table_info('"+tn+"')"
#print(sql)
self.cursor.execute(sql)
self.columns = self.cursor.fetchall()
return self.columns
class SqltDatabase(Database):
def __init__(self, *args):
self._db = sqlite3.connect(args[0])
self._dbname = args[0]
self._cursor = self._db.cursor()
def create(self, dbname):
pass
def getTables(self):
self._cursor.execute("select name from sqlite_master where type='table'")
# create array of table objects
self.tables = []
rows = self._cursor.fetchall()
for r in rows:
self.tables.append(SqltTable(self._db, r))
return self.tables
|
#Sidharth Peri
#10/22/20
#Honor Code: i pledge in my honor that I have abided by the Stevens Honor System
#A program that opens a text file formats the strings and then writes
#the reformatted strings in a new text file
def main():
print("This program takes a text file with lower case names and writes them into a new")
print("text file with all uppercase letters")
#get file names
beforeFile = input("What is the name of the file? ")
afterFile = input("Place names in this file: ")
#open files
before = open(beforeFile, 'r')
after = open(afterFile, 'w')
#for loop to split first and last name into separate strings and use upper function to
#create a new string with uppercase names
for i in before:
first, last = i.split()
new_name= first.upper() + " " + last.upper()
print(new_name,file=after)
#close files
before.close()
after.close()
print("Names have been printed to", afterFile)
main()
|
from math import sqrt
n = int(input())
f = (((1 + sqrt(5)) / 2) ** n - ((1 - sqrt(5)) / 2) ** n) / sqrt(5)
print('{:.1f}'.format(f))
|
class MetaOne(type):
def __new__(meta, classname, supers, classdict): # Redefine type method
print('In MetaOne.new:', classname)
return type.__new__(meta, classname, supers, classdict)
def __init__(cls, classname, supers, classdict):
print('In MetaOne.init:', cls, classname)
def toast(self):
print('toast')
class Super(metaclass=MetaOne): # Metaclass inherited by subs too
def spam(self): # MetaOne run twice for two classes
print('spam')
class Sub(Super): # Superclass: inheritance versus instance
def eggs(self): # Classes inherit from superclasses
print('eggs')# But not from metaclasses
print(dir(Super))
print(dir(Sub))
X = Sub()
X.eggs()
X.spam()
Sub.eggs(X)
Sub.spam(X)
Sub.toast()
MetaOne.toast(Sub)
MetaOne.toast(Super)
|
"""
PRACTICE Test 3, problem 4.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Muqing Zheng. October 2015.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
test_doubler()
def test_doubler():
""" Tests the doubler function. """
# ------------------------------------------------------------------
# TODO: 2. Implement this TEST function.
# It TESTS the doubler function defined below.
# Include at least ** 2 ** tests (we wrote 1 for you).
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the doubler function:')
print('--------------------------------------------------')
# Test 1:
arg1 = [10, -3, 20, 4]
arg2 = [5, 0, 8]
correct_arg1_after = [20, -6, 40, 8]
correct_arg2_after = [5, 0, 8]
expected = [10, 0, 16]
print()
print('BEFORE the function call:')
print(' Argument 1 is:', arg1)
print(' Argument 2 is:', arg2)
answer = doubler(arg1, arg2)
print('AFTER the function call:')
print(' Argument 1 is: ', arg1)
print(' Argument 1 should be:', correct_arg1_after)
print(' Argument 2 is: ', arg2)
print(' Argument 2 should be:', correct_arg2_after)
print('The returned value is: ', answer)
print('The returned value should be:', expected)
# ------------------------------------------------------------------
# TODO 2 (continued): Add your ADDITIONAL test(s) here:
# ------------------------------------------------------------------
arg1 = [1, 0, 22, 4]
arg2 = [25, 10, 14]
correct_arg1_after = [2, 0, 44, 8]
correct_arg2_after = [25, 10, 14]
expected = [50, 20, 28]
print()
print('BEFORE the function call:')
print(' Argument 1 is:', arg1)
print(' Argument 2 is:', arg2)
answer = doubler(arg1, arg2)
print('AFTER the function call:')
print(' Argument 1 is: ', arg1)
print(' Argument 1 should be:', correct_arg1_after)
print(' Argument 2 is: ', arg2)
print(' Argument 2 should be:', correct_arg2_after)
print('The returned value is: ', answer)
print('The returned value should be:', expected)
def doubler(list1, list2):
"""
Both arguments are lists of integers. This function:
-- MUTATEs the first list by doubling each number in the list
and
-- RETURNs a new list that is the same as list2 but with each
number in the list doubled.
For example, if the two arguments are:
[10, -3, 20, 4] and [5, 0, 8]
then this method MUTATEs the first argument to [20, -6, 40, 8]
and RETURNs the list [10, 0, 16]
Preconditions:
:type list1: list of integers
:type list2: list of integers
"""
# ------------------------------------------------------------------
# TODO: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# ------------------------------------------------------------------
list = []
for k in range(len(list1)):
list1[k] = list1[k] * 2
for k in list2:
list += [k * 2]
return list
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
import unittest
from katas.kyu_7.numbers_with_this_digit_inside import \
numbers_with_digit_inside
class DigitInsideTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(numbers_with_digit_inside(5, 6), [0, 0, 0])
def test_equal_2(self):
self.assertEqual(numbers_with_digit_inside(7, 6), [1, 6, 6])
def test_equal_3(self):
self.assertEqual(numbers_with_digit_inside(11, 1), [3, 22, 110])
def test_equal_4(self):
self.assertEqual(numbers_with_digit_inside(20, 0), [2, 30, 200])
def test_equal_5(self):
self.assertEqual(numbers_with_digit_inside(44, 4),
[9, 286, 5955146588160])
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Planar(nn.Module):
def __init__(self):
super(Planar, self).__init__()
self.h = nn.Tanh()
def forward(self, z, u, w, b):
"""
Computes the following transformation:
z' = z + u h( w^T z + b)
Input shapes:
shape u = (batch_size, z_size, 1)
shape w = (batch_size, 1, z_size)
shape b = (batch_size, 1, 1)
shape z = (batch_size, z_size).
"""
# Equation (10)
z = z.unsqueeze(2)
prod = torch.bmm(w, z) + b
f_z = z + u * self.h(prod) # this is a 3d vector
f_z = f_z.squeeze(2) # this is a 2d vector
# compute logdetJ
# Equation (11)
psi = w * (1 - self.h(prod) ** 2) # w * h'(prod)
# Equation (12)
log_det_jacobian = torch.log(torch.abs(1 + torch.bmm(psi, u)))
log_det_jacobian = log_det_jacobian.squeeze(2).squeeze(1)
return f_z, log_det_jacobian
class Coupling(nn.Module):
def __init__(self, in_out_dim, mid_dim, hidden):
"""Initialize a coupling layer.
Args:
in_out_dim: input/output dimensions.
mid_dim: number of units in a hidden layer.
hidden: number of hidden layers.
"""
super(Coupling, self).__init__()
self.in_block = nn.Sequential(
nn.Linear(in_out_dim//2, mid_dim),
nn.ReLU())
self.mid_block = nn.ModuleList([
nn.Sequential(
nn.Linear(mid_dim, mid_dim),
nn.ReLU()) for _ in range(hidden - 1)])
self.out_block = nn.Linear(mid_dim, in_out_dim//2)
perm = torch.randperm(in_out_dim)
eye = torch.eye(in_out_dim)
self.P = eye[perm, :].cuda()
self.PT = self.P.t()
def forward(self, x):
"""Forward pass.
Args:
x: input tensor.
Returns:
transformed tensor.
"""
[B, W] = list(x.size())
# Random permutation
x = x @ self.P
x = x.reshape((B, W//2, 2))
on, off = x[:, :, 0], x[:, :, 1]
off_ = self.in_block(off)
for i in range(len(self.mid_block)):
off_ = self.mid_block[i](off_)
shift = self.out_block(off_)
on = on + shift
x = torch.stack((on, off), dim=2)
x = x.reshape((B, W))
x = x @ self.PT
return x
class Scaling(nn.Module):
"""
Log-scaling layer.
"""
def __init__(self, dim):
"""Initialize a (log-)scaling layer.
Args:
dim: input/output dimensions.
"""
super(Scaling, self).__init__()
self.scale = nn.Parameter(
torch.zeros((1, dim)), requires_grad=True)
def forward(self, x):
"""Forward pass.
Args:
x: input tensor.
Returns:
transformed tensor and log-determinant of Jacobian.
"""
log_det_J = torch.sum(self.scale, dim=1)
x = x * torch.exp(self.scale)
return x, log_det_J
class Coupling_amor(nn.Module):
def __init__(self, input_dim):
"""Initialize a coupling layer.
Args:
Coupling with only 1 hidden layer
input_dim: input dimensions
"""
super(Coupling_amor, self).__init__()
self.h = nn.Tanh()
perm = torch.randperm(input_dim)
eye = torch.eye(input_dim)
self.P = eye[perm, :].cuda()
self.PT = self.P.t()
def forward(self, x, u, w, b):
"""Forward pass.
Args:
x: input tensor.
Returns:
transformed tensor.
"""
[B, W] = list(x.size())
x = x @ self.P
x = x.reshape((B, W//2, 2))
on, off = x[:, :, 0], x[:, :, 1]
off_ = off.unsqueeze(2)
prod = torch.bmm(w, off_) + b
shift = u * self.h(prod)
shift = shift.squeeze(2)
on = on + shift # Additive coupling layer
x = torch.stack((on, off), dim=2)
x = x.reshape((B, W))
x = x @ self.PT
return x
class Scaling_amor(nn.Module):
"""
Log-scaling layer.
"""
def __init__(self):
"""Initialize a (log-)scaling layer.
"""
super(Scaling_amor, self).__init__()
# self.scale = nn.Parameter(
# torch.zeros((1, dim)), requires_grad=True)
def forward(self, x, scale):
"""Forward pass.
Args:
x: input tensor.
scale: scaling tensor
Returns:
transformed tensor and log-determinant of Jacobian.
"""
log_det_J = torch.sum(scale, dim=1)
x = x * torch.exp(scale)
return x, log_det_J
class Sylvester(nn.Module):
"""
Sylvester normalizing flow.
"""
def __init__(self, M):
super(Sylvester, self).__init__()
self.h = nn.Tanh()
def forward(self, z, Q, R, R_tilde, b):
"""
Computes the transformation of Equation (13):
z' = z + QR h(R_tilde Q^T z + b)
Input shapes:
shape z = (batch_size, z_size)
shape R = (batch_size, M, M)
shape R_tilde = (batch_size, M, M)
shape Q = (batch_size, z_size , M)
shape b = (batch_size, M)
"""
##Computations for Equation (13)
z = z.unsqueeze(2)
b = b.unsqueeze(2)
RQ = torch.bmm(R_tilde, Q.transpose(2, 1))
prod = torch.bmm(RQ, z) + b
QR = torch.bmm(Q, R)
#Equation (13)
f_z = z + torch.bmm(QR, self.h(prod))
f_z = f_z.squeeze(2)
##Computations for Equation (14)
R_diag = torch.diagonal(R, dim1=1, dim2=2)
R_tidle_diag = torch.diagonal(R_tilde, dim1=1, dim2=2)
RR_diag = R_diag * R_tidle_diag #RR_diag.shape = [batch_size, M]
h_der = (1 - self.h(prod) ** 2).squeeze(2) #h'(R_tidle Q^T z + b)
#diagonal of the argument of det in Equation (14)
det_J_diag = 1 + h_der * RR_diag
log_det_J_diag = det_J_diag.abs().log()
log_det_jacobian = log_det_J_diag.sum(-1) #det of diagonal matrix
return f_z, log_det_jacobian
# Random Permutation
# class AffineCoupling(torch.nn.Module):
# """
# Input:
# - input_output_dim, mid_dim, hidden_dim=1
# Output:
# - Transformed x->z
# - Log-determinant of Jacobian
# """
# def __init__(self, input_output_dim, mid_dim, hidden_dim=1):
# super(AffineCoupling, self).__init__()
# self.input_output_dim = input_output_dim
# self.mid_dim = mid_dim
# self.hidden_dim = hidden_dim
# self.s = nn.Sequential(nn.Linear(input_output_dim//2, mid_dim), nn.Tanh(), nn.Linear(mid_dim, mid_dim), nn.Tanh(), nn.Linear(mid_dim, input_output_dim//2))
# self.t = nn.Sequential(nn.Linear(input_output_dim//2, mid_dim), nn.Tanh(), nn.Linear(mid_dim, mid_dim), nn.Tanh(), nn.Linear(mid_dim, input_output_dim//2))
# perm = torch.randperm(self.input_output_dim)
# eye = torch.eye(self.input_output_dim)
# self.P = eye[perm, :]
# self.PT = self.P.t()
# def forward(self, x):
# d = self.input_output_dim//2
# x = x @ self.P
# x1, x2 = x[:, :d], x[:, d:]
# scale = self.s(x1)
# translate = self.t(x1)
# z1 = x1
# z2 = x2 * torch.exp(scale)
# z3 = translate
# z4 = z2 + z3
# z = torch.cat((z1, z4), dim=1)
# z = z @ self.PT
# log_det_j = scale.sum(-1)
# return z, log_det_j
# alternate couplings with mask
class AffineCoupling(torch.nn.Module):
"""
Input:
- input_output_dim, mid_dim, hidden_dim=1
Output:
- Transformed x->z
- Log-determinant of Jacobian
"""
def __init__(self, input_output_dim, mid_dim, hidden_dim, mask):
super(AffineCoupling, self).__init__()
self.input_output_dim = input_output_dim
self.mid_dim = mid_dim
self.hidden_dim = hidden_dim
self.mask = mask
self.s = nn.Sequential(nn.Linear(input_output_dim//2, mid_dim), nn.Tanh(), nn.Linear(mid_dim, mid_dim), nn.Tanh(), nn.Linear(mid_dim, input_output_dim//2))
self.t = nn.Sequential(nn.Linear(input_output_dim//2, mid_dim), nn.Tanh(), nn.Linear(mid_dim, mid_dim), nn.Tanh(), nn.Linear(mid_dim, input_output_dim//2))
def forward(self, x):
d = self.input_output_dim//2
x1, x2 = x[:, ::2], x[:, 1::2]
# skipping 2 elements x1 becomes values at 0,2,4 and x2 becomes 1,5,7..checkerboard masking
if self.mask:
x1, x2 = x2, x1
scale = self.s(x1)
translate = self.t(x1)
z1 = x1
z2 = x2 * torch.exp(scale)
z3 = translate
z4 = z2 + z3
if self.mask:
z1, z4 = z4, z1
z = torch.cat((z1, z4), dim=1)
log_det_j = scale.sum(-1)
return z, log_det_j
|
from django.contrib import admin
from todo.models import StaffProfile
admin.site.register(StaffProfile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.