text stringlengths 8 6.05M |
|---|
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % Extreme EEG Signal Analyzer with P300 Detector %
# % Algorithms %
# % %
# % Copyright (C) 2019 Cagatay Demirel. All rights reserved. %
# % demirelc16@itu.edu.tr %
# % %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hilbert, savgol_filter, butter, hamming, lfilter, filtfilt, resample
from scipy.signal import lfilter, hamming, savgol_filter, hilbert, fftconvolve, butter, iirnotch, freqz, firwin, iirfilter
from struct import unpack
import os
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import linear_model, svm, neighbors
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.neural_network import MLPClassifier
import random
import itertools
import pickle
import librosa
from scipy.fftpack import fft
def envelopeCreator(timeSignal, degree, fs):
absoluteSignal = np.abs(hilbert(timeSignal))
intervalLength = int(fs / 40 + 1)
amplitude_envelopeFiltered = savgol_filter(absoluteSignal, intervalLength, degree)
return amplitude_envelopeFiltered
def butter_bandpass(lowcut, highcut, fs, order=3): # 3 ten sonra lfilter NaN degerler vermeye basliyor
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band', analog=False)
# b, a = iirfilter(5, [low, high], rs=60, rp=60, btype='band', analog=False, ftype='cheby1')
return b, a
def butter_lowpass(cutoff, fs, order=3):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
# band-pass filter between two frequency
def butter_bandpass_filter(data, lowcut, highcut, fs, order=3):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
# y = lfilter(b, a, data)
y = filtfilt(b, a, data)
return y
def butter_lowpass_filter(data, cutoff, fs, order=3):
b, a = butter_lowpass(cutoff, fs, order=order)
y = filtfilt(b, a, data)
return y
def FFT(signal, Fs):
nFFT = len(signal) / 2
nFFT = int(nFFT)
#Hamming Window
w = np.hamming(len(signal))
#FFT
X = abs(fft(signal * w)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
fIndexes = (Fs / (2*nFFT)) * np.r_[0:nFFT] # [1,9] 9peet üretti
return X, fIndexes
def hpssFilter(data):
data = librosa.effects.hpss(data.astype("float64"), margin=(1.0,5.0))
return data
def binPower(signal, Band, Fs):
nFFT = len(signal) / 2
nFFT = int(nFFT)
#Hamming Window
w = np.hamming(len(signal))
#FFT
X = abs(fft(signal * w)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
power = np.zeros(len(Band) - 1)
for freq_index in range(0, len(Band) - 1):
freq = Band[freq_index]
nextFreq = Band[freq_index + 1]
beginInd = int(np.floor(freq * len(signal) / Fs))
endInd = int(np.floor(nextFreq * len(signal) / Fs))
power[freq_index] = sum(X[beginInd:endInd])
power_ratio = power / sum(power)
return power, power_ratio
def pfd(X, D=None):
# """Compute Petrosian Fractal Dimension """"
if D is None:
D = np.diff(X)
D = D.tolist()
N_delta = 0 # number of sign changes in derivative of the signal
for i in range(1, len(D)):
if D[i] * D[i - 1] < 0:
N_delta += 1
n = len(X)
return np.log10(n) / (np.log10(n) + np.log10(n / n + 0.4 * N_delta))
def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = []
x = []
N = len(X)
for k in range(1, Kmax):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(np.floor((N - m) / k))):
Lmk += abs(X[m + i * k] - X[m + i * k - k])
Lmk = Lmk * (N - 1) / np.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(np.log(np.mean(Lk)))
x.append([np.log(float(1) / k), 1])
(p, r1, r2, s) = np.linalg.lstsq(x, L)
return p[0]
def hjorth(X, D=None):
""" Compute Hjorth mobility and complexity of a time series """
if D is None:
D = np.diff(X)
D = D.tolist()
D.insert(0, X[0]) # pad the first difference
D = np.array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(np.array(X) ** 2)
M4 = 0
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return np.sqrt(M2 / TP), np.sqrt(float(M4) * TP / M2 / M2) # Hjorth Mobility and Complexity
def hurst(X):
""" Compute the Hurst exponent of X.
"""
X = np.array(X)
N = X.size
T = np.arange(1, N + 1)
Y = np.cumsum(X)
Ave_T = Y / T
S_T = np.zeros(N)
R_T = np.zeros(N)
for i in range(N):
S_T[i] = np.std(X[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = np.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = np.log(R_S)[1:]
n = np.log(T)[1:]
A = np.column_stack((n, np.ones(n.size)))
[m, c] = np.linalg.lstsq(A, R_S)[0]
H = m
return H
def dfa(X, Ave=None, L=None):
"""Compute Detrended Fluctuation Analysis
"""
X = np.array(X)
if Ave is None:
Ave = np.mean(X)
Y = np.cumsum(X)
Y -= Ave
if L is None:
L = np.floor(len(X) * 1 / (2 ** np.array(list(range(4, int(np.log2(len(X))) - 4)))))
F = np.zeros(len(L)) # F(n) of different given box length n
for i in range(0, len(L)):
n = int(L[i]) # for each box length L[i]
if n == 0:
print("time series is too short while the box length is too big")
print("abort")
exit()
for j in range(0, len(X), n): # for each box
if j + n < len(X):
c = list(range(j, j + n))
# coordinates of time in the box
c = np.vstack([c, np.ones(n)]).T
# the value of data in the box
y = Y[j:j + n]
# add residue in this box
F[i] += np.linalg.lstsq(c, y)[1]
F[i] /= ((len(X) / n) * n)
F = np.sqrt(F)
Alpha = np.linalg.lstsq(np.vstack([np.log(L), np.ones(len(L))]).T, np.log(F))[0][0]
return Alpha
#================================================================================================================================
def RecvData(socket, requestedSize):
returnStream = ''
while len(returnStream) < requestedSize:
databytes = socket.recv(requestedSize - len(returnStream))
if databytes == '':
raise (RuntimeError, "connection broken")
returnStream += databytes
return returnStream
def SplitString(raw):
stringlist = []
s = ""
for i in range(len(raw)):
if raw[i] != '\x00':
s = s + raw[i]
else:
stringlist.append(s)
s = ""
return stringlist
# read from tcpip socket
def GetProperties(rawdata):
# Extract numerical data
(channelCount, samplingInterval) = unpack('<Ld', rawdata[:12])
# Extract resolutions
resolutions = []
for c in range(channelCount):
index = 12 + c * 8
restuple = unpack('<d', rawdata[index:index+8])
resolutions.append(restuple[0])
# Extract channel names
channelNames = SplitString(rawdata[12 + 8 * channelCount:])
return (channelCount, samplingInterval, resolutions, channelNames)
def GetData(rawdata, channelCount):
# Extract numerical data
(block, points, markerCount) = unpack('<LLL', rawdata[:12])
# Extract eeg data as array of floats
data = []
for i in range(points * channelCount):
index = 12 + 4 * i
value = unpack('<f', rawdata[index:index+4])
data.append(value[0])
# Extract markers
markers = []
index = 12 + 4 * points * channelCount
for m in range(markerCount):
markersize = unpack('<L', rawdata[index:index+4])
ma = Marker()
(ma.position, ma.points, ma.channel) = unpack('<LLl', rawdata[index+4:index+16])
typedesc = SplitString(rawdata[index+16:index+markersize[0]])
ma.type = typedesc[0]
ma.description = typedesc[1]
markers.append(ma)
index = index + markersize[0]
return (block, points, markerCount, data, markers)
def distinguishData(oneBigSignal, channelCount, resolutions):
i = 0
sampleCount = 0
chunkAmount = 1
dataSeparated = np.zeros((channelCount, int(len(oneBigSignal)/channelCount)))#
while 1:
for j in range(0,channelCount):
dataSeparated[j,sampleCount:sampleCount+chunkAmount] = [k * resolutions[0] for k in oneBigSignal[i:i+chunkAmount]]
i = i + chunkAmount
sampleCount = sampleCount + chunkAmount
if(i >= len(oneBigSignal)):
break
return dataSeparated
def envelopeCreator(timeSignal, degree, Fs):
absoluteSignal = np.abs(hilbert(timeSignal))
intervalLength = int(Fs / 10 + 1)
amplitude_envelopeFiltered = savgol_filter(absoluteSignal, intervalLength, degree)
return amplitude_envelopeFiltered
def notchFilter(data, Fs, f0, Q):
w0 = f0/(Fs/2)
b, a = iirnotch(w0, Q)
y = filtfilt(b, a, data)
# bp_stop_Hz = np.array([49.0, 51.0])
# b, a = butter(2,bp_stop_Hz/(Fs / 2.0), 'bandstop')
# w, h = freqz(b, a)
return y
def eegFilteringOfflineEyeClosed(data, channelCount, stimulusLog, sampFreq, lowPass, highPass, deletionWindowAmount, order=3):
dataSeparatedFilt = np.zeros((channelCount, len(data[0,:])))
for i in range(0,channelCount):
tempData = notchFilter(data[i], sampFreq, 50, 30)
dataSeparatedFilt[i] = butter_bandpass_filter(tempData, lowPass, highPass, sampFreq, order)
dataSeparatedFilt2 = dataSeparatedFilt[:,2500:]
eegSignals = ([],[],[],[])
for i in range(0, channelCount):
eegSignals[i].append([])
eegSignals[i].append([])
eegSignals[i].append([])
eegSignals[i].append([])
eegSignals[i].append([])
count = 0
for i in range(0, len(stimulusLog)):
for j in range(0, channelCount):
if(count+300 > len(dataSeparatedFilt2[0,:])):
break
eegSignals[j][stimulusLog[i]-1].append(dataSeparatedFilt2[j,count:count+300])
count += 200
# eegSignals = eegSignalDeletion(eegSignals, channelCount, deletionWindowAmount)
return eegSignals
def eegFilteringOnlineEyeClose(data, channelCount, sampFreq, lowPass, highPass, correctionMs, order=3):
correctionWindowAmount = int(correctionMs / 2)
dataSeparatedFilt = np.zeros((channelCount, len(data[0,:])))
for i in range(0,channelCount):
tempData = notchFilter(data[i], sampFreq, 50, 30)
tempData = butter_bandpass_filter(data[i], lowPass, highPass, sampFreq, order)
dataSeparatedFilt[i] = tempData - np.mean(tempData[0:correctionWindowAmount])
return dataSeparatedFilt
def eegFilteringOnlineEyeOpen(data, channelCount, sampFreq, lowPass, highPass, correctionMs, order=3):
correctionWindowAmount = int(correctionMs / 2)
dataSeparatedFilt = np.zeros((channelCount, len(data[0,:])))
for i in range(0,channelCount):
# tempData = notchFilter(data[i], sampFreq, 50, 30)
tempData = butter_bandpass_filter(data[i], lowPass, highPass, sampFreq, order)
dataSeparatedFilt[i] = tempData - np.mean(tempData[0:correctionWindowAmount])
return dataSeparatedFilt
def eegFilteringOfflineEyeOpen(data, channelCount, sampFreq, lowPass, highPass, peakAmp, stimulusLog, deletionWindowAmount,
order=3):
dataSeparatedFilt = np.zeros((channelCount, len(data[0,:])))
for i in range(0,channelCount):
tempData = notchFilter(data[i], sampFreq, 50, 30)
dataSeparatedFilt[i] = butter_bandpass_filter(tempData, lowPass, highPass, sampFreq, order)
dataSeparatedFilt2 = dataSeparatedFilt[:,2500:]
eegSignals = ([],[],[],[])
for i in range(0, channelCount):
eegSignals[i].append([])
eegSignals[i].append([])
eegSignals[i].append([])
eegSignals[i].append([])
eegSignals[i].append([])
count = 0
detectedBlinks = 0
for i in range(0, len(stimulusLog)):
if(count+300 > len(dataSeparatedFilt2[0,:])):
break
tempFp1Signal = dataSeparatedFilt2[0,count:count+300]
if(np.max(tempFp1Signal) < 40000):
for j in range(0, channelCount):
eegSignals[j][stimulusLog[i]-1].append(dataSeparatedFilt2[j,count:count+300])
else:
detectedBlinks += 1
count += 200
# eegSignals = eegSignalDeletion(eegSignals, channelCount, deletionWindowAmount)
return eegSignals, dataSeparatedFilt2, detectedBlinks
#def eegSegmentedDataOfflineEyeClose(eegSignals)
def eegSignalDeletion(eegSignals, channelCount, deletionWindowAmount):
for i in range(0, channelCount):
for j in range(5):
for k in range(deletionWindowAmount):
del eegSignals[i][j][-1]
return eegSignals
def baselineCorrection(eegSignals, channelCount, correctionMs):
correctionWindowAmount = int(correctionMs / 2)
for i in range(channelCount):
for j in range(3):
for k in range(len(eegSignals[i][j])):
eegSignals[i][j][k] = eegSignals[i][j][k] - np.mean(eegSignals[i][j][k][0:correctionWindowAmount])
return eegSignals
def p300Creation(eegSignals, channelCount, windowMilliSecond):
windowSize = int(windowMilliSecond / 2)
p300Signals = np.zeros((5*channelCount, windowSize))
stdWindows = np.zeros((channelCount * 5, windowSize))
for i in range(channelCount):
for j in range(5):
p300Signals[5*i+j] = np.mean(eegSignals[i][j], axis=0)
stdWindows[5*i+j] = np.std(eegSignals[i][j], axis=0)
return p300Signals, stdWindows
def dataSeparationFromRAW(data, channelCount, resolutions):
i = 0
sampleCount = 0
chunkAmount = 1
dataSeparated = np.zeros((channelCount, int(len(data)/channelCount)))
while 1:
for j in range(0,channelCount):
dataSeparated[j,sampleCount:sampleCount+chunkAmount] = [k * resolutions[0] for k in data[i:i+chunkAmount]]
i = i + chunkAmount
sampleCount = sampleCount + chunkAmount
if(i >= len(data)):
break
return dataSeparated
def segmentedEEGSignalsP300(eegSignals, channelCount, setDirectory, expNo):
os.chdir(setDirectory)
windowMilliSecond = 600
windowSize = windowMilliSecond / 2
stimulusAmount = 3
foundStimulus = np.zeros((channelCount))
strings = ['Fp1', 'Fz', 'Cz', 'Pz']
for i in range(channelCount):
p300Signals = np.zeros((3, windowSize))
for j in range(stimulusAmount):
p300Signals[j] = np.mean(eegSignals[i][j], axis=0)
foundStimulus[i] = P300FinderAlgorithmSTD(p300Signals)
xAxis = np.arange(0, 599, 2)
plt.figure()
for j in range(stimulusAmount):
plt.plot(xAxis, p300Signals[j], label=[j])
plt.ylabel('Amplitude [uV]', fontsize=20)
plt.xlabel('Time [Ms]', fontsize=20)
plt.legend(loc='upper right', fontsize=10)
plt.title(strings[i] + ' Location, Found Stimulus :' + str(foundStimulus[i]))
plt.show()
plt.savefig(strings[i] + '_experiment_eegSegmentedWindowsFiltered' + str(expNo), bbox_inches='tight',
pad_inches=0, dpi=200)
plt.close()
return foundStimulus
def plotP300(p300Signals, channelCount, setDirectory, expNo):
os.chdir(setDirectory)
strings = ['Fp1', 'Fz', 'Cz', 'Pz']
xAxis = np.arange(0, 599, 2)
for i in range(channelCount):
plt.figure()
for j in range(5):
plt.plot(xAxis, p300Signals[5*i+j], label=[j])
plt.ylabel('Amplitude [uV]', fontsize=20)
plt.xlabel('Time [Ms]', fontsize=20)
plt.legend(loc='upper right', fontsize=10)
plt.title(strings[i] + ' Location')
plt.show()
plt.savefig(strings[i] + '_deney_JustStimuluses' + str(expNo), bbox_inches='tight',
pad_inches=0, dpi=200)
plt.close()
def plotP300TargetNonTargetFrequent(p300Signals, channelCount, setDirectory, expNo, stimulusNo):
os.chdir(setDirectory)
strings = ['Fp1', 'Fz', 'Cz', 'Pz']
xAxis = np.arange(0, 599, 2)
if(stimulusNo==0):
nontarget = np.array([1,2])
elif(stimulusNo==1):
nontarget = np.array([0,2])
else:
nontarget = np.array([0,1])
frequent = np.array([3,4])
targetNontargetFreq = np.zeros((3,300))
label = ['Target','Nontarget','Frequent']
for i in range(channelCount):
plt.figure()
targetNontargetFreq[0] = p300Signals[5*i+stimulusNo]
targetNontargetFreq[1] = np.mean([p300Signals[nontarget[0]], p300Signals[nontarget[1]]], axis=0)
targetNontargetFreq[2] = np.mean([p300Signals[frequent[0]], p300Signals[frequent[1]]], axis=0)
for j in range(3):
plt.plot(xAxis, targetNontargetFreq[j], label=label[j])
plt.ylabel('Amplitude [uV]', fontsize=20)
plt.xlabel('Time [Ms]', fontsize=20)
plt.legend(loc='upper right', fontsize=10)
plt.title(strings[i] + ' Location')
plt.show()
plt.savefig(strings[i] + '_tarNontarFrequent_deney' + str(expNo), bbox_inches='tight',
pad_inches=0, dpi=200)
plt.close()
def plotP300WithStds(p300Signals, stdWindows, channelCount, setDirectory, targetStimulus):
os.chdir(setDirectory)
strings = ['Fp1', 'Fp2', 'Fz', 'Cz', 'Pz', 'P4', 'P3']
xAxis = np.arange(0, 599, 2)
linestyle = '--'
for i in range(channelCount):
plt.figure()
plt.plot(xAxis, p300Signals[3*i+targetStimulus], linewidth = 4, label=['Target Stimulus'])
plt.plot(xAxis, p300Signals[3*i+targetStimulus] + stdWindows[3*i+targetStimulus], color = 'black', linestyle = linestyle,
label=['P300+Std'], linewidth = 0.7)
plt.plot(xAxis, p300Signals[3*i+targetStimulus] - stdWindows[3*i+targetStimulus], color = 'black', linestyle = linestyle,
label=['P300-Std'], linewidth = 0.7)
plt.ylabel('Amplitude [uV]', fontsize=20)
plt.xlabel('Time [Ms]', fontsize=20)
plt.legend(loc='upper left', fontsize=5)
plt.title(strings[i] + ' Location with Standart Deviations')
plt.show()
plt.savefig(strings[i] + '_experiment3_p300_WithStds', bbox_inches='tight',
pad_inches=0, dpi=200)
plt.close()
def plotP300Stds(stdWindows, setDirectory, targetStimulus):
os.chdir(setDirectory)
xAxis = np.arange(0, 599, 2)
strings = ['Fp1', 'Fp2', 'Fz', 'Cz', 'Pz', 'P4', 'P3']
for i in range(channelCount):
plt.plot(xAxis, stdWindows[3*i+targetStimulus])
plt.ylabel('Amplitude [uV]', fontsize=20)
plt.xlabel('Time [Sample]', fontsize=20)
plt.title("Standart Deviation of " + strings[i] + " Windows")
plt.show()
plt.savefig(strings[i] + '_experiment3_stdofWindows', bbox_inches='tight',
pad_inches=0, dpi=200)
plt.close()
def P300FinderAlgorithmPeak(p300Signals):
peaks = np.max(np.abs(p300Signals), axis=1) #P300 finding algorithm
stimulus = np.argmax(peaks)
return stimulus
def P300FinderAlgorithmSTD(p300Signals):
stds = np.std(p300Signals, axis=1)
stimulus = np.argmax(stds)
return stimulus
def P300FinderAlgorithmTotEnergy(p300Signals):
totens = np.sum(np.abs(p300Signals), axis=1)
stimulus = np.argmax(totens)
return stimulus
def P300TravellerFinder(p300Signals, intervalLength):
stimulus = P300FinderAlgorithmPeak(p300Signals)
index = np.argmax(p300Signals[stimulus])
positive_interval, negative_interval = intervalLength, intervalLength
if(index + intervalLength > 300):
positive_interval = 300 - index
if(index - intervalLength < 0):
negative_interval = index
newP300 = np.zeros((3, positive_interval + negative_interval))
for i in range(len(newP300)):
newP300[i] = p300Signals[i,index - negative_interval: index + positive_interval]
finalStimulus = P300FinderAlgorithmSTD(newP300)
return finalStimulus
#=========================================== Trains ========================================================================
#===========================================================================================================================
def allTypeofTrainsetCreator_forAllBrainChannels(eegSignals, stimulusLogs, downSamplingSize, lastNStimulus, label):
eegSignals_channel0, eegSignals_channel1, eegSignals_channel2, eegSignals_channel3 = list(), list(), list(), list()
for i in range(len(eegSignals)):
eegSignals_channel0.append(eegSignals[i][0])
eegSignals_channel1.append(eegSignals[i][1])
eegSignals_channel2.append(eegSignals[i][2])
eegSignals_channel3.append(eegSignals[i][3])
trainsXY_channel0 = allTypeofTrainsetCreator(eegSignals_channel0, stimulusLogs, downSamplingSize, lastNStimulus, label)
trainsXY_channel1 = allTypeofTrainsetCreator(eegSignals_channel1, stimulusLogs, downSamplingSize, lastNStimulus, label)
trainsXY_channel2 = allTypeofTrainsetCreator(eegSignals_channel2, stimulusLogs, downSamplingSize, lastNStimulus, label)
trainsXY_channel3 = allTypeofTrainsetCreator(eegSignals_channel3, stimulusLogs, downSamplingSize, lastNStimulus, label)
return trainsXY_channel0, trainsXY_channel1, trainsXY_channel2, trainsXY_channel3
def allTypeofTrainsetCreator(eegSignals, stimulusLogs, downSamplingSize, lastNStimulus, label):
trainX0All, trainX1All, trainX2All, trainX3All, trainY0All, trainY1All, trainY2All, trainY3All = [],[],[],[],[],[],[],[]
random.seed(312)
for i in range(len(eegSignals)):
temp_eegSignals = eegSignals[i]
stimulusLog = stimulusLogs[i]
trainX0, trainY0 = P300SKLDADownSampledTrainsetCreator(temp_eegSignals, downSamplingSize, label[i])
trainX1, trainY1 = P300SKLDAOddballParadigmDownsampledTrainsetCreator(temp_eegSignals, lastNStimulus, stimulusLog, downSamplingSize, label[i])
trainX2, trainY2 = P300SKLDATrainsetCreator(temp_eegSignals, label[i])
trainX3, trainY3 = P300SKLDAOddballParadigmTrain(temp_eegSignals, stimulusLog, lastNStimulus, label[i])
if(i==0):
trainX0All, trainX1All, trainX2All, trainX3All = trainX0, trainX1, trainX2, trainX3
trainY0All, trainY1All, trainY2All, trainY3All = trainY0, trainY1, trainY2, trainY3
else:
trainX0All = np.append(trainX0All, trainX0, axis=0)
trainX1All = np.append(trainX1All, trainX1, axis=0)
trainX2All = np.append(trainX2All, trainX2, axis=0)
trainX3All = np.append(trainX3All, trainX3, axis=0)
trainY0All = np.append(trainY0All, trainY0, axis=0)
trainY1All = np.append(trainY1All, trainY1, axis=0)
trainY2All = np.append(trainY2All, trainY2, axis=0)
trainY3All = np.append(trainY3All, trainY3, axis=0)
trainsXY = ((trainX0All,trainY0All),(trainX1All,trainY1All),(trainX2All,trainY2All),(trainX3All,trainY3All))
return trainsXY
def allTypeofModelCreator_andCrossValidationAccuracyFinder(trainsXY, typeOfClf, randFrstEstimators, ann_layer1, ann_layer2, brainChannel, plotConf, directorySaveModel, modelFilename):
scores = np.zeros((2))
models = list()
confMats = list()
j=0
class_names = ['Non-target Stimulus', 'Target Stimulus']
for i in range(1): #only take 1. and 3. algorithm
# if(i==0):
# j=1
# else:
j=3
trainsXYTemp = trainsXY[j]
#========= Classifiers ==================
clf = LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage='auto',
solver='lsqr', store_covariance=False, tol=0.0001)
rndfrst = RandomForestClassifier(n_estimators=randFrstEstimators, criterion='gini', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None,
verbose=0, warm_start=False, class_weight=None)
lineardisc = linear_model.SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
svmModel = svm.SVC()
nbrs = neighbors.KNeighborsClassifier(10, weights='distance')
mlp = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=(ann_layer1,ann_layer2), learning_rate='constant',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
#==========Train==========================
random.seed(312)
if(typeOfClf == 0):
scores[i] = np.mean(cross_val_score(clf, trainsXYTemp[0], trainsXYTemp[1].astype("int"), cv=10))
y_pred = cross_val_predict(clf,trainsXYTemp[0] ,trainsXYTemp[1].astype("int"), cv=10)
model = clf.fit(trainsXYTemp[0],trainsXYTemp[1])
elif(typeOfClf == 1):
scores[i] = np.mean(cross_val_score(rndfrst, trainsXYTemp[0], trainsXYTemp[1].astype("int"), cv=10))
print(i)
y_pred = cross_val_predict(rndfrst,trainsXYTemp[0] ,trainsXYTemp[1].astype("int"), cv=10)
model = rndfrst.fit(trainsXYTemp[0],trainsXYTemp[1])
elif(typeOfClf == 2):
scores[i] = np.mean(cross_val_score(lineardisc, trainsXYTemp[0], trainsXYTemp[1].astype("int"), cv=10))
y_pred = cross_val_predict(lineardisc,trainsXYTemp[0] ,trainsXYTemp[1].astype("int"), cv=10)
model = lineardisc.fit(trainsXYTemp[0],trainsXYTemp[1])
elif(typeOfClf == 3):
scores[i] = np.mean(cross_val_score(svmModel, trainsXYTemp[0], trainsXYTemp[1].astype("int"), cv=10))
y_pred = cross_val_predict(svmModel,trainsXYTemp[0] ,trainsXYTemp[1].astype("int"), cv=10)
model = svmModel.fit(trainsXYTemp[0],trainsXYTemp[1])
elif(typeOfClf == 4):
scores[i] = np.mean(cross_val_score(nbrs, trainsXYTemp[0], trainsXYTemp[1].astype("int"), cv=10))
y_pred = cross_val_predict(nbrs,trainsXYTemp[0] ,trainsXYTemp[1].astype("int"), cv=10)
model = nbrs.fit(trainsXYTemp[0],trainsXYTemp[1])
elif(typeOfClf == 5):
scores[i] = np.mean(cross_val_score(mlp, trainsXYTemp[0], trainsXYTemp[1].astype("int"), cv=10))
y_pred = cross_val_predict(mlp,trainsXYTemp[0] ,trainsXYTemp[1].astype("int"), cv=10)
model = mlp.fit(trainsXYTemp[0],trainsXYTemp[1])
models.append(model)
#========== Save Model =====================
os.chdir(directorySaveModel)
# save the model to disk
pickle.dump(model, open(modelFilename, 'wb'))
#======== Confusion Matrix==============
confMat = confusion_matrix(trainsXYTemp[1].astype("int"),y_pred)
if(plotConf == 1):
confMats.append(confMat)
plt.figure()
plot_confusion_matrix(confMat, classes=class_names, title= (brainChannel + ' Confusion matrix'))
return models, scores, confMats
#============================================= Sub-Train Methods ===================================================================
def P300SKLDADownSampledTrainsetCreator(eegSignals, downSamplingSize, targetStimulus):
ratio = downSamplingSize / 500
size = int(300 * ratio)
p300Candidates = np.empty(shape=[0,size])
trainY = np.empty(shape=[0,1])
for i in range(3):
for j in range(len(eegSignals[i])):
if(i==targetStimulus):
p300Candidates = np.row_stack((p300Candidates, resample(eegSignals[i][j], size)))
trainY = np.row_stack((trainY,1))
else:
if(random.random() > 0.5):
p300Candidates = np.row_stack((p300Candidates, resample(eegSignals[i][j], size)))
trainY = np.row_stack((trainY,0))
return p300Candidates, trainY.astype("int").flatten()
def P300SKLDAOddballParadigmDownsampledTrainsetCreator(eegSignals, lastNStimulus, stimulusLog, downSamplingSize, targetStimulus):
stimulusAmount = 3 # 3 amount of stimulus
windowMilliSecond = 600
windowSize = int(windowMilliSecond / 2)
ratio = downSamplingSize / 500
size = int(windowSize * ratio)
stimulusAmounts = np.zeros((stimulusAmount)).astype("int")
for i in range(len(stimulusLog)):
stimulusTemp = stimulusLog[i] - 1
if(stimulusAmounts[0] >= lastNStimulus and stimulusAmounts[1] >= lastNStimulus and stimulusAmounts[2] >= lastNStimulus):
break
else:
if(stimulusTemp < 3):
stimulusAmounts[stimulusTemp] += 1
p300Candidates = np.empty(shape=[0,size])
trainY = np.empty(shape=[0,1])
for i in range(stimulusAmount):
for j in range(stimulusAmounts[i]-lastNStimulus, len(eegSignals[i])-lastNStimulus):
tempP300 = np.mean(eegSignals[i][j:j+lastNStimulus], axis=0)
tempP300 = resample(tempP300, size)
if(i==targetStimulus):
p300Candidates = np.row_stack((p300Candidates, tempP300))
trainY = np.row_stack((trainY,1))
else:
if(random.random() > 0.5):
p300Candidates = np.row_stack((p300Candidates, tempP300))
trainY = np.row_stack((trainY,0))
return p300Candidates, trainY.astype("int").flatten()
def P300SKLDATrainsetCreator(eegSignals, targetStimulus):
p300Candidates = np.empty(shape=[0,300])
trainY = np.empty(shape=[0,1])
for i in range(3):
for j in range(len(eegSignals[i])):
if(i==targetStimulus):
p300Candidates = np.row_stack((p300Candidates, eegSignals[i][j]))
trainY = np.row_stack((trainY,1))
else:
if(random.random() > 0.5):
p300Candidates = np.row_stack((p300Candidates, eegSignals[i][j]))
trainY = np.row_stack((trainY,0))
return p300Candidates, trainY.astype("int").flatten()
def P300SKLDAOddballParadigmTrain(eegSignals, stimulusLog, lastNStimulus, targetStimulus):
stimulusAmount = 3 # 5 amount of stimulus
stimulusAmounts = np.zeros((stimulusAmount)).astype("int")
for i in range(len(stimulusLog)):
stimulusTemp = stimulusLog[i] - 1
if(stimulusAmounts[0] >= lastNStimulus and stimulusAmounts[1] >= lastNStimulus and stimulusAmounts[2] >= lastNStimulus):
break
else:
if(stimulusTemp < 3):
stimulusAmounts[stimulusTemp] += 1
p300Candidates = np.empty(shape=[0,300])
trainY = np.empty(shape=[0,1])
for i in range(stimulusAmount):
for j in range(stimulusAmounts[i]-lastNStimulus, len(eegSignals[i])-lastNStimulus):
tempP300 = np.mean(eegSignals[i][j:j+lastNStimulus], axis=0)
if(i==targetStimulus):
p300Candidates = np.row_stack((p300Candidates, tempP300))
trainY = np.row_stack((trainY,1))
else:
if(random.random() > 0.5):
trainY = np.row_stack((trainY,0))
p300Candidates = np.row_stack((p300Candidates, tempP300))
return p300Candidates, trainY.astype("int").flatten()
#==================================================== Tests =======================================================================
def P300SKLDADownSampledTest(model, instantEEGSignal, downSamplingSize):
ratio = downSamplingSize / 500
size = int(300 * ratio)
foundStimuluses = np.zeros((3))
for i in range(3):
tempX = resample(instantEEGSignal[i], size).reshape(1,-1)
foundStimuluses[i] = model.predict(tempX)
foundStimulus = np.argmax(foundStimuluses)
return foundStimulus
def P300SLDAOddballParadigmTest(model, lastNEEGSignals, ifANN):
foundStimuluses = np.zeros((3))
targetProbs = np.zeros((3))
if(ifANN == 1):
for i in range(3):
p300Signals = np.mean(lastNEEGSignals[i], axis=0).reshape(1,-1)
tempProbs = model.predict_proba(p300Signals)
targetProbs[i] = tempProbs[0,1]
foundStimulus = np.argmax(targetProbs)
else:
for i in range(3):
p300Signals = np.mean(lastNEEGSignals[i], axis=0).reshape(1,-1)
foundStimuluses[i] = model.predict(p300Signals)
foundStimulus = np.argmax(foundStimuluses)
return foundStimulus
def P300SKLDAOddballParadigmDownsampledTest(model, lastNEEGSignals, downSamplingSize, ifANN):
ratio = downSamplingSize / 500
size = int(300 * ratio)
foundStimuluses = np.zeros((3,2))
targetProbs = np.zeros((3))
if(ifANN == 1):
for i in range(3):
p300Signals = np.mean(lastNEEGSignals[i], axis=0)
p300SignalDownSampled = resample(p300Signals, size).reshape(1,-1)
tempProbs = model.predict_proba(p300SignalDownSampled)
targetProbs[i] = tempProbs[0,1]
foundStimulus = np.argmax(targetProbs)
else:
for i in range(3):
p300Signals = np.mean(lastNEEGSignals[i], axis=0)
p300SignalDownSampled = resample(p300Signals, size).reshape(1,-1)
foundStimuluses[i] = model.predict(p300SignalDownSampled)
foundStimulus = np.argmax(foundStimuluses)
return foundStimulus
def P300SKLDATest(model, instantEEGSignal):
foundStimuluses = np.zeros((3))
for i in range(3):
foundStimuluses[i] = model.predict(instantEEGSignal[i].reshape(1,-1))
foundStimulus = np.argmax(foundStimuluses)
return foundStimulus
#===================================================================================================================================
def P300RealTimeAnalyzer(eegSignals, channelCount, string):
windowMilliSecond = 600
stimulusAmount = len(eegSignals)
windowSize = int(windowMilliSecond / 2)
p300Signals = np.zeros((stimulusAmount, windowSize))
for i in range(stimulusAmount):
p300Signals[i] = np.mean(eegSignals[i], axis=0)
stimulusStd = P300FinderAlgorithmSTD(p300Signals)
stimulusPeak = P300FinderAlgorithmPeak(p300Signals)
stimulusTotEn = P300FinderAlgorithmTotEnergy(p300Signals)
stimulusTraveller = P300TravellerFinder(p300Signals, 50)
#===Plotting Realtime P300 ============
# string = 'Fz'
# xAxis = np.arange(0, 599, 2)
# plt.cla()
# for i in range(stimulusAmount):
# plt.plot(xAxis, p300Signals[i], label=[i])
# plt.ylabel('Amplitude [uV]', fontsize=20)
# plt.xlabel('Time [Ms]', fontsize=20)
# plt.legend(loc='upper right', fontsize=10)
# plt.title(string + ' Location, Found Stimulus :' + str(stimulus))
# plt.show()
# plt.pause(.005)
return stimulusStd, stimulusPeak, stimulusTotEn, stimulusTraveller
def onflineP300Finder(eegSignals, stimulusAmount, stimulusLog, travallerIntervalLength):
windowAmount = 0
for i in range(stimulusAmount):
windowAmount += len(eegSignals[i])
windowMilliSecond = 600
windowSize = int(windowMilliSecond / 2)
p300Signals = np.zeros((3, windowSize))
foundStimulusLogStd = []
foundStimulusLogPeak = []
foundStimulusLogTotEn = []
foundStimulusLogTraveller = []
tempEEGWindows = np.zeros((stimulusAmount,300))
stCounts = np.zeros((3)).astype("int")
for i in range(len(stimulusLog)):
print(i)
tempEEGWindows[stimulusLog[i]-1] += eegSignals[stimulusLog[i] - 1][stCounts[stimulusLog[i] - 1]]
stCounts[stimulusLog[i] - 1] += 1
p300Signals[stimulusLog[i]-1] = tempEEGWindows[stimulusLog[i]-1] / (i+1)
stimulusStd = P300FinderAlgorithmSTD(p300Signals)
stimulusPeak = P300FinderAlgorithmPeak(p300Signals)
stimulusTotEn = P300FinderAlgorithmTotEnergy(p300Signals)
stimulusTraveller = P300TravellerFinder(p300Signals, travallerIntervalLength)
foundStimulusLogStd.append(stimulusStd)
foundStimulusLogPeak.append(stimulusPeak)
foundStimulusLogTotEn.append(stimulusTotEn)
foundStimulusLogTraveller.append(stimulusTraveller)
totStimAmounts = np.zeros((4,stimulusAmount))
for j in range(stimulusAmount):
totStimAmounts[0][j] = foundStimulusLogStd.count(j)
for j in range(stimulusAmount):
totStimAmounts[1][j] = foundStimulusLogPeak.count(j)
for j in range(stimulusAmount):
totStimAmounts[2][j] = foundStimulusLogTotEn.count(j)
for j in range(stimulusAmount):
totStimAmounts[3][j] = foundStimulusLogTraveller.count(j)
return foundStimulusLogStd, foundStimulusLogPeak, foundStimulusLogTotEn, foundStimulusLogTraveller, totStimAmounts
def brainwaveFinder(eegSignal, Fs):
# p300Signal = np.mean(eegSignals, axis=0)
eegSignal = notchFilter(eegSignal, Fs, 50, 30)
deltaSignal = butter_bandpass_filter(eegSignal, 0.5, 3, 500, order=3)
thetaSignal = butter_bandpass_filter(eegSignal, 3, 8, Fs, order=3)
alphaSignal = butter_bandpass_filter(eegSignal, 8, 12, Fs, order=3)
betaSignal = butter_bandpass_filter(eegSignal, 12, 38, Fs, order=3)
gammaSignal = butter_bandpass_filter(eegSignal, 38, 48, Fs, order=3)
deltaSignalEnergy = np.sum(deltaSignal**2)
thetaSignalEnergy = np.sum(thetaSignal**2)
alphaSignalEnergy = np.sum(alphaSignal**2)
betaSignalEnergy = np.sum(betaSignal**2)
gammaSignalEnergy = np.sum(gammaSignal**2)
allEnergies = np.array([deltaSignalEnergy, thetaSignalEnergy, alphaSignalEnergy, betaSignalEnergy, gammaSignalEnergy])
return allEnergies
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#==================================== Area 51 ======================================================
# ratio = 20 / 500
# size = int(300 * ratio)
# targetProbs = np.zeros((3))
# for i in range(3):
# p300Signals = np.mean(tempBigWindow[i], axis=0)
# p300SignalDownSampled = resample(p300Signals, size).reshape(1,-1)
# tempProbs = models1[0].predict_proba(p300SignalDownSampled)
# foundStimuluses[i,0] = np.max(tempProbs)
# foundStimuluses[i,1] = np.argmax(tempProbs)
#
# foundStimulus = foundStimuluses[np.argmax(foundStimuluses[:,0]),1]
# for i in range(3):
# p300Signals = np.mean(tempBigWindow[i], axis=0)
# p300SignalDownSampled = resample(p300Signals, size).reshape(1,-1)
# foundStimuluses[i] = models2[0].predict(p300SignalDownSampled)
# foundStimulus = np.argmax(foundStimuluses)
# for i in range(3):
# p300Signals = np.mean(tempBigWindow[i], axis=0).reshape(1,-1)
# tempProbs = loaded_model.predict_proba(p300Signals)
# targetProbs[i] = tempProbs[0,1]
# foundStimulus = np.argmax(targetProbs)
# foundStimuluses = np.zeros((3))
# for i in range(3):
# p300Signals = np.mean(tempBigWindow[i], axis=0).reshape(1,-1)
# foundStimuluses[i] = models3[1].predict(p300Signals)
# foundStimulus = np.argmax(foundStimuluses)
|
import unittest
import os
import platform
from conans import tools
from conans.test.utils.tools import TestClient
from conans.test.utils.test_files import temp_folder
from conans.paths import CONANFILE, long_paths_support
from conans.model.ref import ConanFileReference, PackageReference
import re
conanfile_py = """
from conans import ConanFile
class AConan(ConanFile):
name = "MyPackage"
version = "0.1.0"
short_paths=False
"""
with_deps_path_file = """
from conans import ConanFile
class BConan(ConanFile):
name = "MyPackage2"
version = "0.2.0"
requires = "MyPackage/0.1.0@myUser/testing"
"""
deps_txt_file = """
[requires]
MyPackage2/0.2.0@myUser/testing
"""
class InfoFoldersTest(unittest.TestCase):
def setUp(self):
self.user_channel = "myUser/testing"
self.conan_ref = "MyPackage/0.1.0@%s" % self.user_channel
self.conan_ref2 = "MyPackage2/0.2.0@%s" % self.user_channel
def _prepare_deps(self, client):
client.save({CONANFILE: conanfile_py})
client.run("export %s" % self.user_channel)
client.save({CONANFILE: with_deps_path_file}, clean_first=True)
client.run("export %s" % self.user_channel)
client.save({'conanfile.txt': deps_txt_file}, clean_first=True)
def test_basic(self):
client = TestClient()
client.save({CONANFILE: conanfile_py})
client.run("export %s" % self.user_channel)
client.run("info --paths %s" % (self.conan_ref))
base_path = os.path.join("MyPackage", "0.1.0", "myUser", "testing")
output = client.user_io.out
self.assertIn(os.path.join(base_path, "export"), output)
self.assertIn(os.path.join(base_path, "source"), output)
id_ = "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"
self.assertIn(os.path.join(base_path, "build", id_), output)
self.assertIn(os.path.join(base_path, "package", id_), output)
def test_deps_basic(self):
client = TestClient()
self._prepare_deps(client)
for ref in [self.conan_ref2, ""]:
client.run("info --paths %s" % (ref))
output = client.user_io.out
base_path = os.path.join("MyPackage", "0.1.0", "myUser", "testing")
self.assertIn(os.path.join(base_path, "export"), output)
self.assertIn(os.path.join(base_path, "source"), output)
base_path = os.path.join("MyPackage2", "0.2.0", "myUser", "testing")
self.assertIn(os.path.join(base_path, "export"), output)
self.assertIn(os.path.join(base_path, "source"), output)
def test_deps_specific_information(self):
client = TestClient()
self._prepare_deps(client)
client.run("info --paths --only package_folder --package_filter MyPackage/*")
output = client.user_io.out
base_path = os.path.join("MyPackage", "0.1.0", "myUser", "testing")
self.assertIn(os.path.join(base_path, "package"), output)
self.assertNotIn("build", output)
self.assertNotIn("MyPackage2", output)
client.run("info --paths --only package_folder --package_filter MyPackage*")
output = client.user_io.out
base_path = os.path.join("MyPackage", "0.1.0", "myUser", "testing")
self.assertIn(os.path.join(base_path, "package"), output)
self.assertNotIn("build", output)
base_path = os.path.join("MyPackage2", "0.2.0", "myUser", "testing")
self.assertIn(os.path.join(base_path, "package"), output)
def test_single_field(self):
client = TestClient()
client.save({CONANFILE: conanfile_py})
client.run("export %s" % self.user_channel)
client.run("info --paths --only=build_folder %s" % (self.conan_ref))
base_path = os.path.join("MyPackage", "0.1.0", "myUser", "testing")
output = client.user_io.out
self.assertNotIn("export", output)
self.assertNotIn("source", output)
self.assertIn(os.path.join(base_path, "build"), output)
self.assertNotIn("package", output)
def test_short_paths(self):
folder = temp_folder(False)
short_folder = os.path.join(folder, ".cn")
with tools.environment_append({"CONAN_USER_HOME_SHORT": short_folder}):
client = TestClient(base_folder=folder)
client.save({CONANFILE: conanfile_py.replace("False", "True")})
client.run("export %s" % self.user_channel)
client.run("info --paths %s" % (self.conan_ref))
base_path = os.path.join("MyPackage", "0.1.0", "myUser", "testing")
output = client.user_io.out
self.assertIn(os.path.join(base_path, "export"), output)
if long_paths_support:
self.assertIn(os.path.join(base_path, "source"), output)
self.assertIn(os.path.join(base_path, "build"), output)
self.assertIn(os.path.join(base_path, "package"), output)
self.assertNotIn(short_folder, output)
else:
self.assertNotIn(os.path.join(base_path, "source"), output)
self.assertNotIn(os.path.join(base_path, "build"), output)
self.assertNotIn(os.path.join(base_path, "package"), output)
self.assertIn("source_folder: %s" % short_folder, output)
self.assertIn("build_folder: %s" % short_folder, output)
self.assertIn("package_folder: %s" % short_folder, output)
# Ensure that the inner folders are not created (that could affect
# pkg creation flow
ref = ConanFileReference.loads(self.conan_ref)
id_ = re.search('ID:\s*([a-z0-9]*)', str(client.user_io.out)).group(1)
pkg_ref = PackageReference(ref, id_)
for path in (client.client_cache.source(ref, True),
client.client_cache.build(pkg_ref, True),
client.client_cache.package(pkg_ref, True)):
self.assertFalse(os.path.exists(path))
if not long_paths_support: # The parent has been created for .conan_link
self.assertTrue(os.path.exists(os.path.dirname(path)))
def test_direct_conanfile(self):
client = TestClient()
client.save({CONANFILE: conanfile_py})
client.run("info")
output = client.user_io.out
self.assertNotIn("export_folder", output)
self.assertNotIn("source_folder", output)
self.assertNotIn("build_folder", output)
self.assertNotIn("package_folder", output)
|
class Time:
"""Represents the time of a day.
attributes: hour, minute, second"""
def print_time(t):
'''takes a Time object and prints the time'''
print("%.2d:%.2d:%.2d"%(t.hour, t.minute, t.second))
def time2int(t):
seconds = (t.hour*60 + t.minute) *60 + t.second
return seconds
def int2time(seconds):
t =Time()
t.hour,t.minute,t.second= seconds // 3600, (seconds % 3600) // 60 ,(seconds % 3600) % 60
return t
def is_after(t1,t2):
'''takes two Time objects and
return True if t1 follows t2 chronologically
'''
time1 = time2int(t1)
time2 = time2int(t2)
return time1 > time2
def increment(t,seconds):
'''takes a Time object and a number of seconds,
add the given number to the object.
'''
#modifier
t_seconds = time2int(t)
seconds = t_seconds + seconds
t.hour,t.minute,t.second= seconds // 3600, (seconds % 3600) // 60 ,(seconds % 3600) % 60
def p_increment(t,seconds):
'''takes a Time object and a number of seconds,
add the given number to the object and returns
a new Time object without modifying the input object
'''
#pure function
t_seconds = time2int(t)
seconds = t_seconds + seconds
return int2time(seconds)
def main():
t1 = Time()
t1.hour, t1.minute, t1.second = 11 , 59 , 30
t2 = Time()
t2.hour, t2.minute, t2.second = 2 , 30 , 0
print_time(p_increment(t2,3661))
increment(t2,3661)
print_time(t2)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Imports
from skimage import io
import pandas as pd
import fastparquet as fp
import numpy as np
import os
import tempfile
def readTiff(filename):
"""Read data from the tiff file and return a Pandas dataframe"""
filenamePrefix = os.path.splitext(os.path.basename(filename))[0]
im = io.imread(filename)
print(im.shape[2], im.shape[1], im.shape[0])
dataSize = im.shape[0] * im.shape[1] * im.shape[2]
imgdata1d = im.reshape(dataSize)
sliceSize = im.shape[2] * im.shape[1]
data = {
'x': [(float(i % im.shape[2]) - (0.5 * float(im.shape[2]))) for i in range(0, dataSize)],
'y': [(float(i / im.shape[2] % im.shape[1]) - (0.5 * float(im.shape[1]))) for i in range(0, dataSize)],
'angle': [int(i / sliceSize) for i in range(0, dataSize)],
'value': imgdata1d.astype(np.int32),
}
df = pd.DataFrame(data)
return df
def writeParquet(inputFilename, df):
"""Export Pandas dataframe as Parquet"""
filenamePrefix = os.path.splitext(os.path.basename(inputFilename))[0]
outFilepath = os.path.join(tempfile.gettempdir(), ''.join([filenamePrefix, '.parq']))
fp.write(outFilepath, df, compression='GZIP')
print outFilepath
return outFilepath
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Script to convert data in tiff format to Parquet format')
parser.add_argument('--tiff', dest='filename', help='input tiff file')
parser.add_argument('--hdfs', dest='hdfsroot', help='HDFS root for data upload')
args = parser.parse_args()
# Read TIFF file and convert it into a Pandas dataframe
df = readTiff(args.filename)
# Export dataframe as parquet
outFilepath = writeParquet(args.filename, df)
|
import xml.etree.ElementTree as ElementTree
import json
import datetime
# add <!doctype HTML> yourself
f = open("today.json")
jsondata = json.load(f)
f.close()
tree = ElementTree.parse("base.html")
html = tree.getroot()
guide = html.find("./body/div[@id='guide']")
TFACTOR = 3 # 1 min = TFACTOR pixels
x = 150
now = datetime.datetime.now()
def tvgidsdatum_to_datetime(datumstring):
#"2014-01-28 07:00:00"
#"0123456789"
return(datetime.datetime(
int(datumstring[0:4]), #YYYY
int(datumstring[5:7]), #MM
int(datumstring[8:10]),#DD
int(datumstring[11:13]),#HH
int(datumstring[14:16]),#mm
int(datumstring[17:19]),#ss
))
def date_to_pixels(datum):
pass
def add_program_to(xml_el, d):
global x
titel = d[u'titel']
startd = tvgidsdatum_to_datetime(d[u'datum_start'])
endd = tvgidsdatum_to_datetime(d[u'datum_end'])
runtime = TFACTOR * (endd-startd).total_seconds()/60.0 #in minutes*TFACTOR
el = ElementTree.SubElement(xml_el, "div", {
"class":"prog", "style":"left:" + x.__str__() + "px;width:" +
runtime.__str__() + "px", "title":titel + " | " +
startd.strftime("%H:%M") + " - " + endd.strftime("%H:%M")
})
el.text = titel
x += runtime+5
#jsondata = {"channelname":[{program}, {program}]}
for channelname in jsondata['order']:
programlist = jsondata[channelname]
chan_el = ElementTree.SubElement(guide, "div", {"class":"channel"})
channame_el = ElementTree.SubElement(chan_el, "div",
{"class":"channelname"})
channame_el.text = channelname
x = 150
for progdict in programlist:
try:
add_program_to(chan_el, progdict)
except Exception as e:
el = ElementTree.SubElement(chan_el, "div")
el.text = e.__str__();
break
#break
print('<!doctype HTML>')
print(ElementTree.tostring(html))
|
import pymorphy2
import string
# функция по обработке текста и преобразования его в нормализованный список
def tokenize_text(file_text):
morph = pymorphy2.MorphAnalyzer()
tokens = file_text.split(" ")
tokens = [i.lower() for i in tokens if (i not in string.punctuation)] # удаление пунктуации
tokens = [i.replace("«", "").replace("»", "") for i in tokens]
for i in range(len(tokens)):
p = morph.parse(tokens[i])[0]
tokens[i] = p.normal_form
return tokens
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def process_data(train_path, test_path, img_dims, batch_size):
"""
Creat Train set (with data augmentation) and test set using ImageDataGenerator
Parameters
----------
train_path : path of train set
test_path : path of test set
batch_size : batch size considered by the iterator of the generator
Returns
-------
train_gen, test_gen (type : DirectoryIterator)
"""
# Data generation objects
train_datagen = ImageDataGenerator(
rescale = 1./255,
zoom_range = 0.3,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale=1./255)
# This is fed to the network in the specified batch sizes and image dimensions
train_gen = train_datagen.flow_from_directory(
directory = train_path,
target_size = (img_dims, img_dims),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True)
test_gen = test_datagen.flow_from_directory(
directory=test_path,
target_size=(img_dims, img_dims),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
return train_gen, test_gen |
#!/usr/bin/env python
#coding:utf-8
from flask import Flask,render_template,request
from flask_jsonrpc import JSONRPC
import json
app = Flask(__name__)
jsonrpc = JSONRPC(app, '/api')
@jsonrpc.method('user.list')
def index(username, passwd):
if username=='chen' and passwd=='123':
return 'Hello,Flask JSON-RPC Blueprint...Correct!'
else:
return 'Hello,Flask JSON-RPC Blueprint...Fail!'
if __name__ == "__main__":
app.run(host='0.0.0.0',port=8000,debug=True)
|
class Solution:
def arrayPairSum(self, nums: List[int]) -> int:
if(len(nums) == 0):
return 0
nums.sort()
sum=0
b=len(nums)
for a in range(0,b,2):
sum+=nums[a]
return sum
|
import codecademylib3_seaborn
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
breast_cancer_data = load_breast_cancer()
X_train, validation_data, y_train, validation_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size = 0.2, random_state = 1)
k_list = []
accuracies = []
for k in range(1, 100):
classifier = KNeighborsClassifier(n_neighbors=k)
classifier.fit(X_train, y_train)
k_list.append(k)
accuracies.append(classifier.score(validation_data, validation_test))
plt.plot(k_list, accuracies)
plt.xlabel('K VALUES')
plt.ylabel('Accuracy Scores')
plt.title('Breast Cancer Classifier Accuracy')
plt.show()
# print(X_train.shape)
# print(validation_data.shape)
# print(y_train.shape)
# print(validation_test.shape)
# print(breast_cancer_data.data[0])
# print(breast_cancer_data.feature_names)
# print(breast_cancer_data.target)
# print(breast_cancer_data.target_names) |
#https://leetcode-cn.com/contest/weekly-contest-218/problems/goal-parser-interpretation/
class Solution:
def interpret(self, command: str) -> str:
resultStr:str = ""
index = 0
while index < len(command):
if command[index] == '(':
if command[index+1] == ')':
resultStr += "o"
index += 2
continue
elif command[index + 1] == 'a':
resultStr += "al"
index += 4
continue
elif command[index] == 'G':
resultStr += 'G'
index += 1
continue
index += 1
return resultStr
solution = Solution()
command = "(al)G(al)()()G"
print(solution.interpret(command)) |
import random
import time
for x in range(360):
data = open("data.txt", "a")
line1 = str(x)
line2 = str(random.uniform(1,10))
data.write(line1 + ' ' + line2 + '\n')
data.close()
time.sleep(0.1) |
n = input("Enter n: ")
n = int(n)
count = 1
divisors = []
sum_of_divisors = 0
is_perfect = False
while count <= n - 1:
if n % count == 0:
divisors += [count]
sum_of_divisors += count
count += 1
if sum_of_divisors == n:
is_perfect = True
print("Number {} with divisors {} is perfect? {}".format(n, divisors, is_perfect)) |
import datetime
import secrets
from blog import app, mongo
from blog.decorators import token_required
from blog.posts.models import Post
from flask_pymongo import ObjectId
from flask import request, jsonify
@app.route('/post/create', methods=['POST'])
@token_required
def create_post(current_user):
data = request.json
data['username'] = current_user['username']
post = Post(data)
return post.create()
@app.route('/post/<_id>', methods=['GET'])
@token_required
def retrieve_post(_, _id):
post = mongo.db.post.find_one(
{'_id': ObjectId(_id)},
{'_id': 0}
)
return jsonify(post), 200
@app.route('/post/update/<_id>', methods=['PUT'])
@token_required
def update_post(_, _id):
data = request.json
mongo.db.post.update_one(
{'_id': ObjectId(_id)},
{'$set': data}
)
post = mongo.db.post.find_one(
{'_id': ObjectId(_id)},
{'_id': 0}
)
return jsonify(post), 200
@app.route('/post/delete/<_id>', methods=['DELETE'])
@token_required
def delete_post(current_user, _id):
# Deleting post itself
mongo.db.post.delete_one(
{'_id': ObjectId(_id)}
)
# Deleting post's public id from user posts array
mongo.db.user.update_one(
{'public_id': current_user['public_id']},
{'$pull': {'posts': ObjectId(_id)}}
)
return jsonify({'detail': 'Post has been deleted!'}), 200 |
primeiroTermo = float(input('Digite o primeiro termo: '))
diferenca = float(input('Diferenca: '))
print(primeiroTermo, end=" >> ")
for i in range(1, 11):
primeiroTermo += diferenca
print(primeiroTermo, end=" >> ")
print("FIM") |
"""
2017 Steamworks vision code for Team 2811 (StormBots)
"""
import cv2
import numpy as np
from grip import GripPipeline
import os
from networktables import NetworkTables
import logging
import time
import sys
import math
import datetime
import traceback
# tested using pip package Adafruit-GPIO
# sudo pip install Adafruit-GPIO
import RPi.GPIO as GPIO
from concurrent.futures import ThreadPoolExecutor
##
## Meaty vision functions!
##
def get_diff(mirror=False):
#GPIO.output(led_pin, True)
#time.sleep(.05)
lightson_ret, lightson_img = cam.read()
GPIO.output(led_pin, False)
#time.sleep(.05)
lightsoff_ret, lightsoff_img = cam.read()
GPIO.output(led_pin, True)
if not lightson_ret or not lightsoff_ret:
print("Invalid image!")
# This should probably just return a black image
# of an appropriate size
# (or a really small image that just process as nothing instantly)
# Alternatively, we can return a tuple with (validity,image)
height=480
width=640
return (False, (np.zeros((height,width,3), np.uint8)))
diff_img = cv2.subtract(lightson_img, lightsoff_img)
if mirror:
diff_img = cv2.flip(diff_img, 1)
return (True, diff_img)
def get_target_xy(img):
# print("processing... processing... PROCESSING.")
grip.process(img)
contours = grip.filter_contours_output
img2 = cv2.drawContours(img, contours, -1, (0,255,0), 3)
if drawmode==DRAW_CONTOURS:
cv2.imshow('diff w/ contours', img2)
if cv2.waitKey(1) == 27: # Esc
cv2.destroyAllWindows()
#print('# contours:', len(contours))
dArr = []
for i in range(len(contours)):
moments1 = cv2.moments(contours[i])
cx = int(moments1['m10']/moments1['m00'])
cy = int(moments1['m01']/moments1['m00'])
area = cv2.contourArea(contours[i])
dArr.append([i, cx, cy, area])
#text_file.write("%d %d %d %d %d\n" % (i, cx, cy, area, count))
dArr.sort(key=lambda x: x[3], reverse=True)
## Traits: Group Height, dTop, LEdge, Width ratio, Height ratio
traitAnalysisArr = []
for i in range(len(dArr) - 1): ## range stops before the length, so this stays in bounds
for j in range(i + 1, len(dArr)):
## The non-2 values should represent the top target (as we sort dArr by area and the top target is bigger)
## The -2 values, logically, represent the bottom target rect values
##print('getting rect i for idx', dArr[i][0], 'of contours list with max idx of', (len(contours) - 1))
x,y,w,h = cv2.boundingRect(contours[dArr[i][0]]) ## dArr[i][0] represents the true i-value of the contour
##print('getting rect j for idx', dArr[j][0], 'of contours list with max idx of', (len(contours) - 1))
x2,y2,w2,h2 = cv2.boundingRect(contours[dArr[j][0]])
## See how well the left edges of the targets line up
lEdgeTestValue = abs((((x - x2) / w) + 1))
## Note that these ratios are not safe until we've
## iterated based on greatest-least area... this may
## involve getting i from (sorted) dArr and fetching that i from
## the contours list
widthCompareTestValue = abs((w / w2)) ## Widths should be about the same
heightCompareTestValue = abs((h / (2*h2))) ## Top should be about twice as tall as bottom
areaCompareValue = abs(((w * h) / (w2 * h2)) - 2) ## .5 or (2) (need to force one later)
totalTestScore = lEdgeTestValue + widthCompareTestValue + heightCompareTestValue + areaCompareValue
traitAnalysisArr.append([i, j, totalTestScore])
## end traits loop
bestIdx = 0
bestScore = sys.maxsize # Remember: the scores are like golf; lower is better.
if (len(traitAnalysisArr) > 0): # Do we have any contour pairs?
for i in range(len(traitAnalysisArr)):
if (traitAnalysisArr[i][2] < bestScore):
bestIdx = i
bestScore = traitAnalysisArr[i][2]
#TODO: How is [i] even defined here? It appears to simply be the last value set from
# the for loop above, which should equal the last item in traitAnalysisArr
# this working at all seems like a python scope bug, and probably not what you intended
#
# I tested this, and the following code confirms that this is possible
# for i in range(5):
# print i
# print i
# print i
# which prints 1,2,3,4,4,4
#
# I think you forgot the sort function to sort by best score, eg
# traitAnalysisArr.sort(key=lambda x: x[2], reverse=True)
print('Best score: pair', traitAnalysisArr[i], 'with a score of', traitAnalysisArr[i][2], '')
print(' ^ Contour 1: ', dArr[traitAnalysisArr[i][0]], ' || Contour 2: ', dArr[traitAnalysisArr[i][1]])
# TODO: Check if changing this from i to bestIdx was appropriate
cdata = dArr[traitAnalysisArr[bestIdx][0]]
dist_cy = cdata[2]
dist_cx = cdata[1]
return True,dist_cx,dist_cy
else:
return False,0,0
def get_distance_to_boiler(target_cy):
## TODO: Optimize to use FOV in rad to remove the conversion
## TODO: Optimize to get cx as either arg or pass in dArr item
# The angle the camera is tilted *back* at. Center pixel represents this angle.
camera_angle = 38
# Angle of top pixel, which is added to camera_angle to produce an angle for the
# top pixel (cy = 0) of the image. Then, after this, we can subtract cy * (vfov/px)
fov_angle_top = (52.0175 / 2.0) # /2 because we want only the top half of the FOV
vertical_angle_offset = (target_cy * 1/24.6071)
total_angle = camera_angle + fov_angle_top - vertical_angle_offset
'''
tan(total_angle) = boiler_height / distance
distance * tan(total_angle) = boiler_height
distance = boiler_height / tan(total_angle)
'''
# Dan's magic equations
#angle_from_vertical = cam_deg+(calculated pixel from top)*cam_fov
angle_from_vertical = camera_angle+target_cy/480*52.0175
angle_from_horizon = 90-angle_from_vertical
total_angle = angle_from_horizon
print("CY ",target_cy)
print("Angle ",total_angle)
target_height = 8 + (2/12.0) - (44/12.0) # approx height of top target (trig: opp) !!!! MINUS THE CAMERA TEST HEIGHT VALUE !!!!
total_angle_rad = total_angle * (math.pi / 180.0)
distance = target_height / math.tan(total_angle_rad)
print("Distance ", distance)
return distance
def get_horizontal_angle_offset(target_cx):
## TODO: Optimize to get cx as arg or get dArr value
degrees_per_px = 1 / 12.30355
angle_offset = degrees_per_px * (target_cx - 320)
return angle_offset
##
## GENERAL SETUP SUB ROUTINES
##
def set_os_camera_parameters():
# Set up the camera properties to avoid stupid problems
os.system("v4l2-ctl -c backlight_compensation=0")
# Your tweaks here
# os.system("v4l2-ctl -c ")
def LED_initialize(led):
led_high = True
GPIO.setmode(GPIO.BOARD)# Configure the pin counting system to board-style
# In this mode it's the pin of the IDC cable
#GPIO.setmode(GPIO.BCM) # Configure the pin counting to internal IO number
GPIO.setup(led,GPIO.OUT)# Configure the GPIO as an output
GPIO.output(led, True)
##
## NETWORK TABLES HELPER FUNCTIONS WITH WRAPPED TRY/EXCEPTS
##
def NT_initialize(address):
try:
NetworkTables.initialize(server=address)
print("Connected to ", address)
NT_getTable()
return True
except:
return False
else:
return False
def NT_getTable(tableName="vision"):
if NetworkTables.isConnected():
table = NetworkTables.getTable(tableName)
print("Network table located!")
return table
else:
return None
def NT_putBoolean(key,value):
try: return table.putBoolean(key,value)
except: return default
def NT_putNumber(key,value):
try: return table.putNumber(key,value)
except:pass
def NT_putNumberArray(key,value):
try: table.putNumberArray(key,value)
except:pass
def NT_putString(key,value):
try: table.putString(key,value)
except:pass
def NT_getBoolean(key,default):
try: return table.getBoolean(key,default)
except: return default
# TODO: Maybe make this invalidate our table/networkTable connection?
def NT_getNumber(key,default):
try: return table.getNumber(key,default)
except: return default
def NT_delete(key):
try: return table.delete(key)
except: return default
led_pin = 40
networkTableConnected = False
table = None
cam = None
frame_count = 0
DEBUG = False
drawmode = 0
DRAW_DISABLED = 0
DRAW_DIFF = 1
DRAW_CONTOURS = 2
grip = GripPipeline()
def noop():
pass
def image_process_pipeline(img,frameid):
fname="diff_%03s.jpg" % frameid
cv2.imwrite(fname,img)
return "Saved frameid "+fname
sleep(1)
return (False,0,0,frame_count)
process_pool = ThreadPoolExecutor(1)
process_thread = process_pool.submit(noop)
if __name__ == '__main__':
LED_initialize(led_pin)
networkTableConnected = False
# Check for debug flag
if "--debug" in sys.argv or "-d" in sys.argv:
print("Debug mode activated!")
DEBUG=True
drawmode=DRAW_CONTOURS
# Attempt to connect to a camera
while cam==None:
try:
cam = cv2.VideoCapture(0)
NT_delete("camera_error")
print("Camera operational!")
except:
NT_putString("camera_error","Unable to connect the camera!")
print("Camera not available!")
time.sleep(1)
# Main Loop
while(1):
# Attempt connection to NetworkTables
if False==networkTableConnected:
# Failure mode: Continue anyway (probably dev mode)
#NT_initialize("2811") or NT_initialize("2812")
networkTableConnected = NT_initialize("roboRIO-2811-FRC.frc-robot.local") or NT_initialize("roboRIO-2811-frc.local") or NT_initialize("10.28.11.2") or NT_initialize("10.28.12.2") or NT_initialize("10.28.11.30")
#networkTableConnected = True
if None == table:
NT_getTable()
# TODO periodically check and invalidate NetworkTables + Table if
# The connection gets dropped
# This should also set the enabled flag to false
# One option is if (frame_counter % 20==0) or something
# Another reasonable spot is in except block of getBoolean,
# Which should only happen if we lose our table.
# Check if robot is enabled
if not NT_getBoolean("enabled", False) and not DEBUG:
print("Robot not enabled!")
time.sleep(0.05)
continue
#update our frame count
frame_count+=1
# Shove all image processing subroutine calls here
try:
# Get diff image
valid, img = get_diff()
# Draw the diff if that's what you're into
if valid and drawmode==DRAW_DIFF:
try:
cv2.imshow('diff', img)
if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
break
except:pass
#If our worker thread is idle, break off and start
#processing a new image
# TODO: check nice values, run below this priority
# TODO: See how many parallel calculations we can run.
# we should be able to stream 2-3 threads across the Pi's cores
# to process things quickly.
# TODO: Investigate using callbacks for NetworkTables publishing
if process_thread.running():
pass
elif process_thread.done():
# Do something with the current results
print(process_thread.result())
#restart the process with the newest image
process_thread = process_pool.submit(image_process_pipeline,img,frame_count)
else:
process_thread = process_pool.submit(image_process_pipeline,img,frame_count)
except Exception as error:
print("Ran into an error!")
print(error)
print(traceback.format_exc())
#raise(error) #comment this line to continue running despite errors
continue
# Sleep a bit. This should help with some of the overheating issues,
# as well as power draw.
# TODO: investigate if this is required and/or causes problems
#time.sleep(0.1)
# Post cleanup!
cv2.destroyAllWindows()
GPIO.output(led_pin, False)
|
#-*-coding: utf-8 -*-#
class HousePark():
__lastname__ = "박" #프라이빗의 의미
def __int__(self, name):
self.full_name = self.__last_name__ + name
def trabel(self,where):
print("%s, %s 여행을 가다"%(self.full_name, where))
pey = HousePark("응용")
print(pey.__lastname__)
|
from authentication.models import Person
from rest_framework import serializers
from django.contrib.auth.hashers import make_password
class PersonSerializer(serializers.ModelSerializer):
class Meta:
model = Person
exclude = ['created_at', 'updated_at']
extra_kwargs = {
'password': {
'write_only': True,
'required': True,
'min_length': 8
}
}
def create(self, validated_data):
person = super().create(validated_data)
person.set_password(person.password)
person.save()
person.password = None
return person
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(min_length=8, max_length=50)
confirm_new_password = serializers.CharField(min_length=8, max_length=50)
def validate(self, data):
"""
Check password is same or not
"""
if data['new_password'] != data['confirm_new_password']:
raise serializers.ValidationError('Password doest not match!')
return data
|
class Solution(object):
def generateTrees(self, n):
if n == 0:
return []
return self.dfs(1, n)
def dfs(self, start, end):
if start > end:
return [None]
res = []
for idx in range(start, end + 1):
leftnodes = self.dfs(start, idx - 1)
rightnodes = self.dfs(idx + 1, end)
for left in leftnodes:
for right in rightnodes:
root = TreeNode(val)
root.left = left
root.right = right
res.append(root)
return res
|
# app/models.py
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
class Employee(UserMixin, db.Model):
"""
Create an Employee table
"""
# Ensures table will be named in plural and not in singular
# as is the name of the model
__tablename__ = 'employees'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(60), index=True, unique=True)
username = db.Column(db.String(60), index=True, unique=True)
name = db.Column(db.String(60), index=True)
password_hash = db.Column(db.String(128))
role = db.Column(db.String(60))
@property
def is_admin(self):
return self.role in ['CEO', 'Manager']
@property
def password(self):
"""
Prevent pasword from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<Employee: {}>'.format(self.username)
# Set up user_loader
@login_manager.user_loader
def load_user(user_id):
return Employee.query.get(int(user_id))
class Product(db.Model):
"""
Create a Product table
"""
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60))
mfg_date = db.Column(db.Date)
exp_date = db.Column(db.Date)
rcv_date = db.Column(db.Date)
location = db.Column(db.String(100))
stock = db.Column(db.Float)
supplier_id = db.Column(db.Integer, db.ForeignKey('suppliers.id'))
shipments = db.relationship('Shipment', backref='product', lazy='dynamic')
transactions = db.relationship('Transaction', backref='product', lazy='dynamic')
def __repr__(self):
return '<Product: {}>'.format(self.name)
class Supplier(db.Model):
"""
Create a Supplier table
"""
__tablename__ = 'suppliers'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60))
email = db.Column(db.String(60), index=True, unique=True)
contact = db.Column(db.String(50))
address = db.Column(db.String(100))
products = db.relationship('Product', backref='supplier', lazy='dynamic')
def __repr__(self):
return '<Supplier: {}>'.format(self.name)
class Shipment(db.Model):
"""
Create a Shipment table
"""
__tablename__ = 'shipments'
id = db.Column(db.Integer, primary_key=True)
department = db.Column(db.String(60))
name = db.Column(db.String(50))
quantity = db.Column(db.Float)
shipment_date = db.Column(db.Date)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'))
def __repr__(self):
return '<Shipment: {} units of {} sent to {} from {} on {}>'.format(self.quantity,
self.product_id,
self.name,
self.department,
self.shipment_date)
class Transaction(db.Model):
"""
Create a Transaction table
"""
__tablename__ = 'transactions'
id = db.Column(db.Integer, primary_key=True)
quantity = db.Column(db.Float)
date = db.Column(db.Date)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'))
def to_dict(self):
return {
'id': self.id,
'date': self.date,
'product': self.product.name,
'quantity': self.quantity
}
def __repr__(self):
return '<Transaction: {} units of {} sent/received>'.format(self.quantity,
self.product_id)
|
from django.shortcuts import render, redirect
from django.contrib.auth import logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.http.response import HttpResponse
from social_django.models import UserSocialAuth
from github import Github
from main import sensitive_data
from json import loads
def get_github(user):
# Todo: may be not found
user_social_auth = UserSocialAuth.objects.get(user=user)
access_token = user_social_auth.extra_data['access_token']
return Github(user_social_auth.user.username, access_token)
|
from django.contrib import admin
from app1.models import Kitap,Yazar
# Register your models here.
admin.site.register(Kitap)
admin.site.register(Yazar)
|
"""abcd URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from tem import views
from tem.views import GeneratePdf
app_name = "xc"
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.emp),
path('show', views.show),
path('home', views.home),
path('access', views.access_session),
path('delete', views.delete_session),
path('loginpage', views.loginpage),
path('pdf/', views.GeneratePdf.as_view()),
path('course', views.course),
path('showco', views.showco),
path('edit/<str:id>', views.edit),
path('update/<str:id>', views.update),
path('delete/<str:id>', views.delete),
path('dbms', views.dbms),
path('os', views.os),
path('hci', views.hci),
path('toc', views.toc),
path('sepm', views.sepm),
path('contact', views.contact),
path('pricing', views.pricing),
path('index1', views.index1),
path('dm', views.dm),
path('courses1', views.courses1),
path('editmap/<str:id>', views.editmap),
path('updatemap/<str:id>', views.updatemap),
path('ind', views.ind),
path('indi', views.indi),
path('result', views.result),
path('display2', views.display2),
path('adminentry', views.adminentry),
path('adminlogin', views.adminlogin),
path('adminhomepage', views.adminhomepage),
path('coursescheme', views.coursescheme),
path('result', views.result),
path('display2', views.display2),
path('weights', views.weights),
path('teacherentry', views.teacherentry),
path('teacher_alloc', views.teacher_alloc),
path('report_gen', views.report_gen),
path('showreport', views.showreport),
path('poentry', views.poentry),
path('courseinfo', views.courseinfo),
path('programs', views.programs),
path('gencis', views.gencis)
]
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ColorRampManager
A QGIS plugin
plugin to manage and download color ramp definitions
-------------------
begin : 2012-08-04
copyright : (C) 2012 by Etienne Tourigny
email : etourigny dot dev at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load ColorRampManager class from file ColorRampManager
from colorrampmanager import ColorRampManager
return ColorRampManager(iface)
|
from PIL import Image
import requests
from io import BytesIO
from datetime import datetime
# Get delivery date of cart order
# Some sample token. Instead replace with the token returned by authentication endpoint
JWT_TOKEN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0IiwiaWFkIjoxLCJhY3AiOm51bGwsInRicCI6bnVsbCwiaWF0IjoxNTg4MTk1MjA1fQ.A6QVTjGLaYAwxOYN0khYxls1_xf6hHHb4VSg5nqZsVc'
# Change to the cart that the user is editing
# You could find this cart with the user's id by searching for a cart
# that matches the user id and also has confirmed = 0 (meaning the cart
# is still being edited)
# Could also search by operator id
CART_ID = 1
def make_read_request(what_to_select, table, conditions):
# Web API host URL (change if needed)
# BASE_HOST_URL = 'http://127.0.0.1:8000'
BASE_HOST_URL = 'http://www.appspesa.it/api'
# No need to change this
ENDPOINT_ROUTE = '/v1/query/read'
# No need to change this
# Authorization header information
headers = {"Authorization": "Bearer " + JWT_TOKEN}
# No need to change this
# Query parameters to pass to request
params = {
'what_to_select': what_to_select,
'which_table': table,
'conditions_to_satisfy': conditions
}
# Make request and return the response
return requests.get(BASE_HOST_URL + ENDPOINT_ROUTE, headers=headers, params=params)
def convertSQLDateTimeToDatetime(value):
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
# Retrieve the bool value for if cart order has been delivered and can be retired
r = make_read_request('DateDelivery', 'cart', 'id = ' + str(CART_ID))
# If the request was successful
if r.status_code == 200:
content = r.json()
# Retrieve delivery date information
date = content['rows'][0][0]
timestamp = convertSQLDateTimeToDatetime(date)
day_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
# Extract information to show in the UI
month_name = timestamp.strftime("%B")
day_num = timestamp.day
day_name = day_names[timestamp.weekday()]
# Print delivery date info
print(month_name)
print(day_num)
print(day_name)
|
"""System ID"""
systemID1 = "Team 1 BS"
systemID2 = "Team 2 CCS"
systemID3 = "Team 3 EMS"
systemID4 = "Team 4 DS"
systemID5 = "Team 5 LS"
systemID6 = "Team 6 SACS"
"""Command Codes"""
AB = "Apply Brakes"
SL = "Signal Lights"
AG = "Apply Gas"
BL = "Brake Lights"
EL = "Emergency Lights"
HL = "Headlights"
"""Date Time Stamp"""
datetimestamp = "2016_09_08_19:25"
"""Time to Live"""
TTL = "10ms"
"""Check Sum"""
checksum = "123456ff0-22"
|
# import pytest
# from wechaty.wechaty import Wechaty
# import pdb
# @pytest.mark.asyncio
# async def test_mention_text_without_mentions(test_bot: Wechaty) -> None:
# """Test extracting mention text from a message without mentions"""
# msg = await test_bot.Message.find(message_id="no_mention")
# await msg.ready()
# text = await msg.mention_text()
# assert text == 'foo bar asd'
# @pytest.mark.asyncio
# async def test_mention_text_without_mentions_in_room(test_bot: Wechaty) -> None:
# """Test extracting mention text from a message without mentions"""
# msg = await test_bot.Message.find(message_id="room_no_mention")
# await msg.ready()
# text = await msg.mention_text()
# assert text == 'beep'
# @pytest.mark.asyncio
# async def test_mention_text_with_mentions_in_room(test_bot: Wechaty) -> None:
# """Test extracting mention text from a message without mentions"""
# msg = await test_bot.Message.find(message_id="room_with_mentions")
# await msg.ready()
# text = await msg.mention_text()
# assert text == 'test message asd'
# @pytest.mark.asyncio
# async def test_mention_text_with_mentions_and_alias_in_room(test_bot: Wechaty) -> None:
# """Test extracting mention text from a message without mentions"""
# msg = await test_bot.Message.find(message_id="room_with_mentions_and_alias")
# await msg.ready()
# text = await msg.mention_text()
# assert text == '123123 kkasd'
# @pytest.mark.asyncio
# async def test_mention_text_with_mentions_and_mismatched_alias(test_bot: Wechaty) -> None:
# """Test extracting mention text from a message without mentions"""
# msg = await test_bot.Message.find(message_id="room_with_mentions_and_alias_mismatched")
# await msg.ready()
# text = await msg.mention_text()
# assert text == '123123@Fake User beep'
# @pytest.mark.asyncio
# async def test_mention_text_with_mentions_but_not_mention_data(test_bot: Wechaty) -> None:
# """Test extracting mention text from a message without mentions"""
# msg = await test_bot.Message.find(message_id="room_with_text_mentions")
# await msg.ready()
# text = await msg.mention_text()
# assert text == '@Wechaty User @Test User @Fake Alias beep!!'
|
'''
Suppose we could access yesterday's stock prices as a list, where:
The indices are the time in minutes past trade opening time, which was 9:30am local time.
The values are the price in dollars of Apple stock at that time.
So if the stock cost $500 at 10:30am, stock_prices_yesterday[60] = 500.
Write an efficient function that takes stock_prices_yesterday and returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday.
'''
import random
def get_max_profit(stock_prices_yesterday):
maxprofit = 0
selltime = -1
buytime = -1
for firstindex, firstprice in enumerate(stock_prices_yesterday):
for lastindex, lastprice in enumerate(stock_prices_yesterday):
if lastprice-firstprice > maxprofit and lastindex > firstindex:
maxprofit = lastprice-firstprice
selltime = lastindex
buytime = firstindex
print "Buy at time {} and sell at time {} profit {}".format(buytime,selltime, maxprofit)
stock_prices_yesterday = [10, 7, 5, 8, 11, 9]
stock_prices_today = random.sample(range(100), 40)
get_max_profit(stock_prices_yesterday) # returns 6 (buying for $5 and selling for $11)
get_max_profit(stock_prices_today) |
#form learned from https://www.youtube.com/watch?v=3XOS_UpJirU
from django import forms
class PersonForm(forms.Form):
person = forms.CharField(label='') |
from app import db
from models import BlogPost
# create the db and the db
db.drop_all()
|
from sqlalchemy.orm import Session
from quipper import (
models,
schemas,
)
def create_message(db: Session, message: schemas.MessageCreate):
message = models.Message(
sender=message.sender,
conversation_id=message.conversation_id,
message=message.message,
)
db.add(message)
db.commit()
def get_conversation(db: Session, conversation_id: str):
messages = db.query(models.Message) \
.filter(models.Message.conversation_id == conversation_id) \
.all()
return schemas.Conversation(
id=conversation_id,
messages=messages
)
|
from funcparserlib.lexer import make_tokenizer, Token
import re
ENCODING = 'utf-8'
regexps = {
'escaped': r'''
\\ # Escape
((?P<standard>["\\/bfnrt]) # Standard escapes
| (u(?P<unicode>[0-9A-Fa-f]{4}))) # uXXXX
''',
'unescaped': r'''
[\x20-\x21\x23-\x5b\x5d-\uffff] # Unescaped: avoid ["\\]
''',
'ml_unescaped': r'''
[\x01-\x21\x23-\uffff]
''',
}
specs = [
('STRING', (r'"""(%(ml_unescaped)s | %(escaped)s)*?"""' % regexps, re.VERBOSE | re.MULTILINE)),
('STRING', (r"'''(%(ml_unescaped)s | %(escaped)s)*?'''" % regexps, re.VERBOSE | re.MULTILINE)),
('NEWLINE', (r'[\r\n]+',)),
('SPACE', (r'[\s\f\t]+',)),
('STRING', (r'"([^\"] | %(escaped)s)*"' % regexps, re.VERBOSE)),
('STRING', (r"'([^\'] | %(escaped)s)*'" % regexps, re.VERBOSE)),
('OP', (r'[:.,*#|\-+=<>/!(){}\[\]]',)),
('NAME', (r'[^ \n\t\f:.,*#|\-+=<>/!(){}\[\]]+',)),
]
tokenizer = make_tokenizer(specs)
def idented_tokenizer(s):
eol = False
idents = [0]
last_token = None
for token in tokenizer(s):
# print(token)
last_token = token
if token.type == 'NEWLINE':
eol = True
yield token
continue
if eol:
value = token.value
ident = len(value) - len(value.lstrip(' '))
last = idents[-1]
if ident > last:
yield Token('INDENT', 'INDENT', start=token.start, end=token.end)
idents.append(ident)
while ident < last:
yield Token('DEDENT', 'DEDENT', start=token.start, end=token.end)
idents.pop()
last = idents[-1]
yield token
eol = False
if last_token.type != 'NEWLINE':
yield Token('NEWLINE', '\n', start=last_token.end, end=last_token.end)
for i in idents[1:]:
yield Token('DEDENT', 'DEDENT', start=token.start, end=token.end)
yield Token('END', '', start=token.start, end=token.end)
if __name__ == '__main__':
for tok in idented_tokenizer(open('example/templates/test.bs').read()):
if tok.type in ('INDENT', 'DEDENT'):
print(tok) |
# Globle variables of configuration parameters
DATA_BASE_PATH = "/Volumes/WorkDisk/data/tick2016" # path of raw data
OUTPUT_PATH = "/Users/qt/Desktop/Notes/vol_prediction/output" # path to save figures and outputs
OUTPUT_DATA_PATH = "/Users/qt/Desktop/Notes/vol_prediction/output_data" # path to save calculated volatility
ROLLING_WINDOW = 120 # rolling window used in predicting volatility
LAG = 6 # best lag is 6
VOL_NAME = 'Vol_' + repr(LAG) # name of calculated volatility in the Dataframe
|
print("this is line line 1 by master")
print("this is line line 2 by master")
<<<<<<< HEAD
print("this is line line 1 by cloud2")
print("this is line line 2 by cloud2")
print("this is line line 1 by cloud1")
print("this is line line 2 by cloud1")
print("this is line line 1 by cloud3")
print("this is line line 2 by cloud3")
print("this is line line 1 by cloud4")
print("this is line line 2 by cloud4")
print("this is line line 1 by cloud5")
print("this is line line 2 by cloud5")
=======
print("this is line line 1 by cloud6")
print("this is line line 2 by cloud6")
>>>>>>> cloud6
|
"""
同步调用就是你喊你朋友吃饭,你朋友在忙,你就在那等,一直等他忙完了,然后你们一起去吃饭
异步调用就是你喊你朋友吃饭,你朋友说知道了,待会我忙完了去找你,你先忙别的
"""
from multiprocessing import Pool
import time,os
def main():
def func1():
print("进程池中的进程:%d----%d"%(os.getpid(),os.getppid()))
for i in range(3):
print("-------%d-------"%i)
time.sleep(1)
return "hahaha"
def func2(arg):
print("----pid=%d--"%os.getpid())
print("------arg=%s"%arg)
pool=Pool(3)
pool.apply_async(func=func1,callback=func2)
while True:
time.sleep(1)
print("-------主进程pid=%d"%os.getpid())
if __name__ == '__main__':
main() |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Support for buildsets in the database
"""
import time
import sqlalchemy as sa
from datetime import datetime
from buildbot.util import json
from buildbot.db import base
class BuildsetsConnectorComponent(base.DBConnectorComponent):
"""
A DBConnectorComponent to handle getting buildsets into and out of the
database. An instance is available at C{master.db.buildsets}.
"""
def addBuildset(self, ssid, reason, properties, builderNames,
external_idstring=None):
"""
Add a new Buildset to the database, along with the buildrequests for
each named builder, returning the resulting bsid via a Deferred.
Arguments should be specified by keyword.
@param ssid: id of the SourceStamp for this buildset
@type ssid: integer
@param reason: reason for this buildset
@type reason: short unicode string
@param properties: properties for this buildset
@type properties: L{buildbot.process.properties.Properties} instance,
or None
@param builderNames: builders specified by this buildset
@type builderNames: list of strings
@param external_idstring: external key to identify this buildset;
defaults to None
@type external_idstring: unicode string
@returns: buildset ID via a Deferred
"""
def thd(conn):
submitted_at = datetime.now()
submitted_at_epoch = time.mktime(submitted_at.timetuple())
transaction = conn.begin()
# insert the buildset itself
r = conn.execute(self.db.model.buildsets.insert(), dict(
sourcestampid=ssid,
submitted_at=submitted_at_epoch,
reason=reason,
external_idstring=external_idstring))
bsid = r.inserted_primary_key[0]
# add any properties
if properties:
conn.execute(self.db.model.buildset_properties.insert(), [
dict(buildsetid=bsid, property_name=k,
property_value=json.dumps([v,s]))
for (k,v,s) in properties.asList() ])
# and finish with a build request for each builder
conn.execute(self.db.model.buildrequests.insert(), [
dict(buildsetid=bsid, buildername=buildername,
submitted_at=submitted_at_epoch)
for buildername in builderNames ])
transaction.commit()
return bsid
return self.db.pool.do(thd)
def subscribeToBuildset(self, schedulerid, buildsetid):
"""
Add a row to C{scheduler_upstream_buildsets} indicating that
C{SCHEDULERID} is interested in buildset @C{BSID}.
@param schedulerid: downstream scheduler
@type schedulerid: integer
@param buildsetid: buildset id the scheduler is subscribing to
@type buildsetid: integer
@returns: Deferred
"""
def thd(conn):
conn.execute(self.db.model.scheduler_upstream_buildsets.insert(),
schedulerid=schedulerid,
buildsetid=buildsetid,
complete=0)
return self.db.pool.do(thd)
def unsubscribeFromBuildset(self, schedulerid, buildsetid):
"""
The opposite of L{subscribeToBuildset}, this removes the subcription
row from the database, rather than simply marking it as inactive.
@param schedulerid: downstream scheduler
@type schedulerid: integer
@param buildsetid: buildset id the scheduler is subscribing to
@type buildsetid: integer
@returns: Deferred
"""
def thd(conn):
tbl = self.db.model.scheduler_upstream_buildsets
conn.execute(tbl.delete(
(tbl.c.schedulerid == schedulerid) &
(tbl.c.buildsetid == buildsetid)))
return self.db.pool.do(thd)
def getSubscribedBuildsets(self, schedulerid):
"""
Get the set of buildsets to which this scheduler is subscribed, along
with the buildsets' current results. This will exclude any rows marked
as not active.
The return value is a list of tuples, each containing a buildset ID, a
sourcestamp ID, a boolean indicating that the buildset is complete, and
the buildset's result.
@param schedulerid: downstream scheduler
@type schedulerid: integer
@returns: list as described, via Deferred
"""
def thd(conn):
bs_tbl = self.db.model.buildsets
upstreams_tbl = self.db.model.scheduler_upstream_buildsets
q = sa.select(
[bs_tbl.c.id, bs_tbl.c.sourcestampid,
bs_tbl.c.results, bs_tbl.c.complete],
whereclause=(
(upstreams_tbl.c.schedulerid == schedulerid) &
(upstreams_tbl.c.buildsetid == bs_tbl.c.id) &
(upstreams_tbl.c.active)),
distinct=True)
return [ (row.id, row.sourcestampid, row.complete, row.results)
for row in conn.execute(q).fetchall() ]
return self.db.pool.do(thd)
|
#https://leetcode-cn.com/problems/divide-two-integers/
#不可用 乘法 除法 MOD
#不能直接用减法实现,考虑 被除数特别大,而除数又特别小,运算会超时;
#不用位移,采用二分法,不停用 除数的N次幂去裁剪区域 + 迭代
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
changeExp = False
if (dividend > 0 and divisor < 0) or (dividend < 0 and divisor > 0):
changeExp = True
dividend = abs(dividend)
divisor = abs(divisor)
if divisor == 1:
result = dividend
else:
result = self.subCount(dividend, divisor)
if changeExp:
result = - result
if result > 2**31 - 1 or result < -2**31:
if changeExp:
result = -2**31
else:
result = 2**31 - 1
return result
def subCount(self, bc, cs):
if bc < cs:
return 0
if bc == cs:
return 1
for index in range(1,32):
if cs**index >= bc:
return cs**(index-2) + self.subCount(bc-cs**(index-1), cs)
solution = Solution()
#divend = -10
#divsor = 3
divend = -2147483648
divsor = -1
print(solution.divide(divend, divsor))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-18 22:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0026_auto_20160914_1448'),
]
operations = [
migrations.AddField(
model_name='messagetemplate',
name='insert_optout',
field=models.BooleanField(default=1, verbose_name='Insert Unsubscribe'),
preserve_default=False,
),
]
|
from __future__ import unicode_literals
__version__ = '2018.07.21'
|
import socket
import os
import time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
msg = "mesaj"
s.sendto(msg.encode(),('127.0.0.1', 9999))
|
import scipy.stats as stats
import numpy as np
import fitsio
from sklearn import mixture
import pickle
import pandas as pd
def mags2nanomaggies(mags):
return np.power(10., (mags - 22.5)/-2.5)
def df_from_fits(filename, i=1):
""" create a pandas dataframe from a fits file """
return pd.DataFrame.from_records(fitsio.FITS(filename)[i].read().byteswap().newbyteorder())
def fit_fluxes(fluxes, filename, k = 4):
g = mixture.GMM(n_components=4, covariance_type='full')
g.fit(fluxes)
output = open(filename, 'wb')
pickle.dump(g, output)
output.close()
"""
re - [0, infty], transformation log
ab - [0, 1], transformation log (ab / (1 - ab))
phi - [0, 180], transformation log (phi / (180 - phi))
"""
def fit_gal_shape(re, ab, phi, filename, k = 4):
gre = mixture.GMM(n_components=4, covariance_type='full')
gre.fit(np.log(re))
gab = mixture.GMM(n_components=4, covariance_type='full')
gab.fit(np.log(ab / (1 - ab)))
gphi = mixture.GMM(n_components=4, covariance_type='full')
gphi.fit(np.log(phi / (180 - phi)))
output = open(filename, 'wb')
pickle.dump((gre, gab, gphi), output)
output.close()
# read galaxy and star from FITS
print "reading in galaxy and star fluxes"
data_gals = fitsio.FITS('../../data/existing_catalogs/gals.fits')[1].read()
data_stars = fitsio.FITS('../../data/existing_catalogs/stars.fits')[1].read()
print "reading in co-added galaxies"
test_coadd_fn = "../../data/stripe_82_dataset/square_106_4.fit"
coadd_df = df_from_fits(test_coadd_fn)
# store as (log nanomaggies)
fluxes_gals = np.zeros((len(data_gals['cmodelmag_u']), 5))
fluxes_stars = np.zeros((len(data_stars['psfmag_u']), 5))
print "creating array of fluxes"
bands = ['u', 'g', 'r', 'i', 'z']
for bandn,band in enumerate(bands):
galaxy_name = 'cmodelmag_' + band
star_name = 'psfmag_' + band
for index, r in enumerate(data_gals[galaxy_name]):
fluxes_gals[index][bandn] = np.log(mags2nanomaggies(r))
for index, r in enumerate(data_stars[star_name]):
fluxes_stars[index][bandn] = np.log(mags2nanomaggies(r))
valid_fluxes_gals = np.array([True] * len(data_gals['cmodelmag_u']))
valid_fluxes_stars = np.array([True] * len(data_stars['psfmag_u']))
for bandn,band in enumerate(bands):
valid_fluxes_gals = valid_fluxes_gals & (fluxes_gals[:,bandn] != np.inf)
valid_fluxes_stars = valid_fluxes_stars & (fluxes_stars[:,bandn] != np.inf)
fluxes_gals_final = fluxes_gals[valid_fluxes_gals,:]
fluxes_stars_final = fluxes_stars[valid_fluxes_stars,:]
print "fitting galaxy fluxes"
fit_fluxes(fluxes_gals_final[:10000,:], 'gal_fluxes_mog.pkl')
print "fitting star fluxes"
fit_fluxes(fluxes_stars_final[:10000,:], 'star_fluxes_mog.pkl')
print "fitting galaxy shape"
fit_gal_shape(coadd_df['expRad_r'] + 0.01,
coadd_df['expAB_r'],
coadd_df['expPhi_r'],
'gal_shape_mog.pkl')
|
import requests
from bs4 import BeautifulSoup
yourid="the7mincheol@naver.com"
yourpwd="24901402"
session=requests.Session()
r=session.get("http://class.likelion.net/users/sign_in")
html=BeautifulSoup(r.text,"html.parser")
token2=html.input.next_sibling["type"]
print(token2)
token=html.input.next_sibling["value"]
print(token)
'''
params={'user[email]':yourid ,'user[password]':yourpwd,'authenticity_token':token}
r=session.post("http://class.likelion.net/users/sign_in",params)
r=session.get("http://class.likelion.net")
print(r.text)
'''
|
import pickle
from struct import pack, unpack
from copter.storage import Secret, StorageItem, DataType
class IncrementalFile:
def __init__(self):
self.path = '/tmp/secrets'
def add(self, key):
with open(self.path, 'ab') as fp:
data = pickle.dumps(StorageItem(DataType.ADD, key))
fp.write(pack('<I', len(data)) + data)
def get(self, name):
self._get_all().get(name)
def remove(self, key):
with open(self.path, 'ab') as fp:
data = pickle.dumps(StorageItem(DataType.REMOVE, Secret(key)))
fp.write(pack('<I', len(data)) + data)
def list(self):
return self._get_all().keys()
def _get_all(self):
secrets = {}
with open(self.path, 'rb') as fp:
while True:
size = fp.read(4)
if not size:
break
size = unpack('<I', size)
item = pickle.loads(fp.read(size[0]))
if item.dataType == DataType.ADD:
secrets[item.payload.name] = item.payload
elif item.dataType == DataType.REMOVE:
if item.payload.name in secrets:
del secrets[item.payload.name]
return secrets
|
#!/usr/bin/python3
import os
uname=input("enter input from user")
a=uname.isalpha()
if a ==True:
passwd="hello"+uname
os.system("sudo useradd -m -p "+passwd+" "+uname)
print("user created!")
else:
print("all charcter are not in string!")
|
#!/bin/python
import sys
import copy
import re
infile = open(sys.argv[1], "r")
class Cube:
KEEPALIVECOUNT = [2,3]
ACTIVATECOUNT = 3
def __init__(self, active=False):
self.active = active
self.nextState = None
def getState(self):
return self.active
def computeCycle(self, neighbours):
neighbourActiveCount = 0
for p in neighbours:
if p.getState():
neighbourActiveCount += 1
if self.active:
if neighbourActiveCount in self.KEEPALIVECOUNT:
self.nextState = True
else:
self.nextState = False
else:
if neighbourActiveCount == self.ACTIVATECOUNT:
self.nextState = True
else:
self.nextState = False
#if self.nextState:
# print ("\t On")
#else:
# print ("\t Off")
def executeCycle(self):
self.active = self.nextState
def __str__(self):
if self.active:
return "#"
else:
return "."
neighbourGrid = [(x,y,z) for x in range(-1,2) for y in range(-1,2) for z in range(-1,2)]
neighbourGrid.remove((0,0,0))
pocketDimension = {}
x = 0
pocketDimension['xmin'] = 0
pocketDimension['ymin'] = 0
pocketDimension['zmin'] = 0
for line in infile:
line = line.rstrip()
for (y, c) in enumerate(line):
#print(y,c)
pocketDimension['ymax'] = y
if c == '.':
pocketDimension[(x,y,0)] = Cube(False)
elif c == '#':
pocketDimension[(x,y,0)] = Cube(True)
else:
raise Exception("Bad Parse")
x+=1
pocketDimension['xmax'] = x-1
pocketDimension['zmax'] = 0
def printGrid():
global pocketDimension
for z in range(pocketDimension['zmin'], pocketDimension['zmax']+1):
print("z=%d" % z)
for x in range(pocketDimension['xmin'], pocketDimension['xmax']+1):
row = ""
for y in range(pocketDimension['ymin'], pocketDimension['ymax']+1):
row += str(pocketDimension[(x,y,z)])
print(row)
def processGridNode(grid, node, coords, extend=True):
gridExtend = {}
if isinstance(node, Cube):
(x,y,z) = coords
neighbours = []
#print(coords)
for p in neighbourGrid:
(xoff, yoff, zoff) = p
if (x+xoff, y+yoff, z+zoff) in grid:
neighbours.append(grid[(x+xoff, y+yoff, z+zoff)])
elif extend:
gridExtend[(x+xoff, y+yoff, z+zoff)] = Cube(False)
grid['xmin'] = min(grid['xmin'], x+xoff)
grid['xmax'] = max(grid['xmax'], x+xoff)
grid['ymin'] = min(grid['ymin'], y+xoff)
grid['ymax'] = max(grid['ymax'], y+xoff)
grid['zmin'] = min(grid['zmin'], z+xoff)
grid['zmax'] = max(grid['zmax'], z+xoff)
continue
else:
continue
grid[coords].computeCycle(neighbours)
return gridExtend
def processGrid(grid):
gridExtend = {}
for (coords, node) in zip(grid.keys(), grid.values()):
gridExtend.update(processGridNode(grid, node, coords))
grid.update(gridExtend)
for (coords, node) in zip(gridExtend.keys(), gridExtend.values()):
processGridNode(grid, node, coords, False)
for cycle in range(6):
print(cycle)
#printGrid()
processGrid(pocketDimension)
for coords in pocketDimension:
if isinstance(pocketDimension[coords], Cube):
pocketDimension[coords].executeCycle()
printGrid()
print(sum([ x.getState() for x in pocketDimension.values() if isinstance(x, Cube)])) |
vertice_a = float(input('Informe o valor do vértice A: '))
vertice_b = float(input('Informe o valor do vértice B: '))
vertice_c = float(input('Informe o valor do vértice C: '))
if (vertice_b - vertice_c) < vertice_a < (vertice_b + vertice_c) and (vertice_a - vertice_c) < vertice_b < (vertice_a + vertice_c) and (vertice_a - vertice_b) < vertice_c < (
vertice_a + vertice_b):
print('Esses valores FORMAM um triângulo!')
else:
print('Esses valores NÃO FORMAM um triângulo!')
|
import tools
import re
class Mutation:
"""
Pos is stored zero-indexed such that the start methionine is pos=0
"""
regex = re.compile("^([A-Z]?)(-?\d+)([A-Z]?)$")
__slots__ = ['ref', 'pos', 'alt']
def __init__(self, ref, pos, alt):
self.ref = ref
self.pos = pos
self.alt = alt
if ref is None and alt is None:
raise ValueError("Both ref and alt cannot be None")
@classmethod
def fromstring(cls, st, offset=0):
match = cls.regex.match(st)
if match is None:
raise ValueError("String '{}' is not formatted as mutation".format(st))
ref, pos, alt = match.groups()
ref = None if ref == "" else ref
alt = None if alt == "" else alt
pos = int(pos) - 1 + offset
return cls(ref, pos, alt)
def __repr__(self):
a = "" if self.ref is None else self.ref
b = "" if self.alt is None else self.alt
return "{}{}{}".format(a, self.pos+1, b)
def __eq__(self, other):
return self.ref == other.ref and self.pos == other.pos and self.alt == other.alt
def __hash__(self):
return hash(self.ref) ^ hash(self.pos) ^ hash(self.alt)
class Clade:
__slots__ = ['name', 'definition', 'subclades']
def __init__(self, name, definition, parent=None):
definition = set(definition)
self.name = name
if parent is None:
self.definition = definition
else:
self.definition = definition | parent.definition
self.subclades = []
if parent is not None:
parent.add(self)
def intersect(self, mutationset):
return self.definition & mutationset
def matches(self, mutationset):
return self.definition.issubset(set(mutationset))
def add(self, clade):
if not isinstance(clade, Clade):
raise ValueError("Can only add Clades to a Clade")
self.subclades.append(clade)
# HA2 offset in H1N1 is 344, e.g. HA2 X1Y is HA X345Y
H1N1 = {"H1": ([], {
"6B.1": (["S162N", "I216T"], {
"6B.1A": (["S74R", "S164T", "I295V"], {
"6B.1A1": (["S183P"], {}),
"6B.1A2": (["S183P", "L233I", "HA2 V193A"], {}),
"6B.1A3": (["T120A", "S183P"], {}),
"6B.1A4": (["N129D", "A144E", "S183P"], {}),
"6B.1A5": (["S183P", "N260D"], {
"6B.1A5A": (["N129D", "T185A"], {}),
"6B.1A5B": (["E235D", "HA2 V193A"], {}),
}),
"6B.1A6": (["T120A", "S183P"], {}),
"6B.1A7": (["K302T", "HA2 I77M", "HA2 N169S", "HA2 E179D"], {})
})
})
})
}
# Note: WHO made a mistake and accidentally listed N145S as N144S
H3N2 = {"H3": ([], {
"3C.2a": (["L3I", "N145S", "F159Y", "K160T", "N225D", "Q311H", "HA2 D160N"], {
"3C.2a1": (["N171K", "HA2 I77V", "HA2 G155E"], {
"3C.2a1a": (["T135K", "HA2 G150E"], {}),
"3C.2a1b": (["E62G", "R142G", "H311Q"], {}),
}),
"3C.2a2": (["T131K", "R142K", "R261Q",], {}),
"3C.2a3": (["N121K", "S144K"], {}),
"3C.2a4": (["N31S", "D53N", "S144R", "N171K", "I192T", "Q197H"], {})
}),
"3C.3a": (["A138S", "F159S", "N225D", "K326R"], {}),
})
}
# Parse the mutatitions
# This function takes (name: ([], {})) tuple
def parse_clade(parent, pair, offset):
name, (definition, subclades) = pair
mutations = []
for st in definition:
if st.startswith("HA2"):
thisoffset = 344
st = st.partition(" ")[2]
else:
thisoffset = offset
mutation = Mutation.fromstring(st, thisoffset)
mutations.append(mutation)
clade = Clade(name, mutations, parent)
for pair in subclades.items():
parse_clade(clade, pair, offset)
return clade
H1_clade = parse_clade(None, next(iter(H1N1.items())), 0)
H3_clade = parse_clade(None, next(iter(H3N2.items())), 0)
# Logic to find the right clade
def findclade(clade, mutations):
mutset = set(mutations)
result = clade
subclades = [c for c in clade.subclades if c.matches(mutations)]
# Strangely, one subclade A may have a set of mutations which is a subset
# of clade B, while A not being annotated as a subclade of B. My program chooses
# the subclade with most mutations in that case.
if len(subclades) > 1:
subclades.sort(key=lambda x: len(x.definition), reverse=True)
if all(sc.definition.issubset(subclades[0].definition) for sc in subclades[1:]):
subclades = [subclades[0]]
if len(subclades) == 1:
return findclade(subclades[0], mutations)
else:
return result
if __name__ == '__main__':
with open(snakemake.input.subtype) as file:
subtype = next(file).strip()
offset = tools.get_offset(subtype)
print(snakemake.input.subtype, subtype, offset)
mutations = set()
with open(snakemake.input.mutations) as file:
for line in file:
mutations.add(Mutation.fromstring(line.strip(), offset))
if subtype.startswith("H1"):
clade = findclade(H1_clade, mutations).name
elif subtype.startswith("H3"):
clade = findclade(H3_clade, mutations).name
else:
clade = "unknown clade"
with open(snakemake.output[0], "w") as file:
print(clade, file=file)
|
#!/usr/bin/python3
# encoding=UTF-8
from pyserpent import Serpent
from os import urandom
from binascii import crc32
from progbar import ProgBar # progressbar
import sys # stderr
class LinbootHexEncryptor(Serpent):
"""
Encrypts intel hex-file with serpent in CBC mode.
Initially developed for AVR firmware hex-files, with other MCU compatibility is unknown.
Output binary file compatible with linboot-bootloader.
"""
PAGE_SIZE = 128 # flash page size, must be multiple by cipher block size (!)
FILLER = 0xFF # value for padding unused flash
FW_SIZE_ADD = 0xB6
def __init__(self, key, ivc):
"""
Initializes cipher
:param key: 256-bit key
:param ivc: 128-bit initial vector for CBC
"""
Serpent.__init__(self, key)
self.flash = {}
self.ivc = ivc
@staticmethod
def block_xor(block1, block2):
""" XORs two cipher block """
assert len(block1) == Serpent.get_block_size() and len(block2) == len(block1)
return bytes(block1[i] ^ block2[i] for i in range(Serpent.get_block_size()))
@staticmethod
def read_hexlines(input_stream, strict_check_sum=True):
"""
Reads and parses all lines form hex file
:param input_stream: hex-file
:param strict_check_sum: is exception raised when line with incorrect check sum met
:return: list of hexlines
"""
hexlines = []
while True:
hexline = LinbootHexEncryptor.read_hexline(input_stream, strict_check_sum)
if hexline is not None:
hexlines.append(hexline)
else:
return hexlines
@staticmethod
def read_hexline(input_stream, strict_check_sum=True):
"""
Reads and pareses line in intel hex format
:param input_stream: hex-file
:param strict_check_sum: is exception raised when line with incorrect check sum met
:return: tokenized hex-record in dictionary or None if EOF met
"""
line = input_stream.readline()
if line:
data_len = int(line[1:3], 16)
address_msb, address_lsb = int(line[3:5], 16), int(line[5:7], 16)
address = address_msb * 256 + address_lsb
record_type = int(line[7:9], 16)
data = bytes(int(line[9+i*2:11+i*2], 16) for i in range(data_len))
check_sum = int(line[9+data_len*2:11+data_len*2], 16)
check_sum_actual = (~((data_len+address_msb+address_lsb+record_type+sum(data)) & 0x00FF) + 1) & 0xFF
if check_sum != check_sum_actual:
if strict_check_sum:
sys.stdout.write("Incorrect check sum in HEX-file\nline: "
"\"{0}\"\ncalculated check sum: {1}\n"
"check sum in file: {2}".format(line.strip(), check_sum_actual, check_sum))
raise ValueError()
else:
sys.stdout.write("[WARNING] {0} "
"Incorrect check sum!\n".format(hex(address)))
return {"data_len": data_len, "address": address, "record_type": record_type, "data": data}
else:
return None # EOF
def flash_write(self, address, data):
"""
Added bytes in flash buffer dealing with page structure
:param address: absolute address
:param data: bytes to write
:return:
"""
data_len = len(data)
data = iter(data)
begin_page_no = address // self.PAGE_SIZE
end_page_no = (address + data_len - 1) // self.PAGE_SIZE
for page_no in range(begin_page_no, end_page_no + 1):
if page_no not in self.flash:
self.flash[page_no] = bytearray(self.FILLER for _ in range(self.PAGE_SIZE))
if page_no == begin_page_no:
for offset in range(address % self.PAGE_SIZE,
min((address + data_len - 1) % self.PAGE_SIZE + 1, self.PAGE_SIZE + 1)):
self.flash[page_no][offset] = next(data)
elif page_no == end_page_no:
for offset in range(0, (address + data_len) % self.PAGE_SIZE + 1):
self.flash[page_no][offset] = next(data)
else:
for offset in range(0, self.PAGE_SIZE + 1):
self.flash[page_no][offset] = next(data)
def encrypt_hex(self, input_file_path, output_file_path):
"""
Reads input hex-file and writes encrypted firmware in output binary file
:param input_file_path: intel hex-file
:param output_file_path: output binary file for encrypted firmware
:return:
"""
fw_size = 0;
# read hex file
with open(input_file_path, "r") as hexfile:
for hexline in self.read_hexlines(hexfile):
self.flash_write(hexline["address"], hexline["data"])
if hexline["address"] + len(hexline["data"]) > fw_size:
fw_size = hexline["address"] + len(hexline["data"]) + 1
if fw_size % 2 == 1:
fw_size += 1
# Программа ЦМСР по-своему определяет размер прошивки. Именно его и надо вбивать в исходный код приложения в
# INFO-блок по адресу self.FW_SIZE_ADD. Поэтому строчка закомментирована.
# self.flash_write(self.FW_SIZE_ADD, bytes([fw_size & 0xFF, fw_size >> 8 & 0xFF]))
sys.stdout.write("[INFO ] Firmware size in bytes: {0}\n".format(fw_size))
# encrypt read pages and write them in binary file
accum = self.ivc
with open(output_file_path, "wb") as binfile:
pb = ProgBar(" Encrypting {0} pages of flash ".format(len(self.flash)))
for page_no in self.flash:
pb.update(page_no / len(self.flash))
page_bytes = self.flash[page_no]
blocks_per_page = self.PAGE_SIZE // self.get_block_size()
assert self.PAGE_SIZE % self.get_block_size() == 0
for block_no in range(0, blocks_per_page):
block = bytes(page_bytes[block_no*self.get_block_size():(block_no+1)*self.get_block_size()])
encrypted_block = self.encrypt(self.block_xor(block, accum))
accum = encrypted_block
binfile.write(encrypted_block)
page_no_bytes = bytes(page_no >> 8 * i & 0xFF for i in range(2))
crc32_bytes = bytes(crc32(page_bytes + page_no_bytes) >> 8 * i & 0xFF for i in range(4))
block = page_no_bytes + crc32_bytes\
+ urandom(self.get_block_size() - len(page_no_bytes) - len(crc32_bytes))
encrypted_block = self.encrypt(self.block_xor(block, accum))
accum = encrypted_block
binfile.write(encrypted_block)
pb.close()
sys.stdout.write("[INFO ] Done\n") |
import collections
from inputplotdataISM import inputplotdict
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--plotitem', default='gf12imhdcv')
args = parser.parse_args()
print args.plotitem
plotlist=[args.plotitem]
for plotneed in plotlist:
inputplotdict[plotneed]['_plotfunction'](inputplotdict[plotneed])
|
import nltk
import os
import pandas as pd
import numpy as np
from operator import itemgetter
def get_data(num_docs=10000, batch_size=128, data_path=None, get_minibatches=True):
"""
Gets the word2vec training data.
:param num_docs: int; number of documents to use in training
:param batch_size: int; size for the minibatches during training
:param data_path: string; path to training data
:param get_minibatches: Boolean; if true, partitions training data into minibatches
:return: tuple; batches, word2idx, idx2word
"""
if not data_path:
local_dir = os.path.dirname(__file__)
data_path = os.path.join(local_dir, "../data/train_data.csv")
df = pd.read_csv(data_path)
reviews = list(df["Description"])[:num_docs]
processed = pre_process(reviews)
word2idx, idx2word, vocab = get_lookups(processed)
encoded = [[[word2idx[token] if token in vocab else word2idx["UNK"] for token in sent]
for sent in doc] for doc in processed]
if not get_minibatches:
return encoded, word2idx, idx2word
else:
batches = get_batches(encoded, batch_size)
return batches, word2idx, idx2word
def pre_process(docs):
"""
Performs preprocessing: sentence/word tokenization, lowering.
:param docs: list; a list containing each document as a string
:return: list; processed documents
"""
processed_docs = []
for doc in docs:
sents = nltk.sent_tokenize(doc)
tokenized_sents = [nltk.word_tokenize(sent) for sent in sents]
processed = [[token.lower() for token in sent] for sent in tokenized_sents]
processed_docs.append(processed)
return processed_docs
def get_lookups(docs, min_freq=3):
"""
Gets the dictionaries mapping words onto their index in the embedding array.
:param docs: list; each document is a list of sentences, each sentence is a list of tokens;
each token is a string representing a word
:param min_freq: int; minimum word freaquency to be included in vocab
:return: tuple; (word2idx, idx2word, vocab)
"""
words = [word for doc in docs for sent in doc for word in sent]
fd = nltk.FreqDist(words)
vocab = sorted([(word, freq) for word, freq in fd.items() if freq >= min_freq],
key=itemgetter(1),
reverse=True)
vocab = [word for word, _ in vocab]
vocab.insert(0, "UNK")
word2idx = {word:idx for idx, word in enumerate(vocab)}
idx2word = {idx:word for idx, word in enumerate(vocab)}
return word2idx, idx2word, vocab
def get_batches(docs, batch_size=128, window_size=5, num_skips=3):
"""
Partition the training data into minibatches.
:param docs: list; each document is a list of sentences, each sentence is a list of tokens;
each token is an integer representing a word in the vocabulary
:param batch_size: int; size of the minibatches
:param window_size: int; size of the skip-gram context window
:param num_skips: int; number of context words to sample
:return: list; minibatches
"""
batches = []
center_batch = []
context_batch = []
for doc in docs:
for sent in doc:
for idx, token in enumerate(sent):
context_words = [sent[pos] for pos in range(idx-window_size, idx+window_size+1)
if pos != idx and pos < len(sent) and pos >= 0]
if not context_words:
continue
num_to_sample = num_skips if len(context_words) >= num_skips else len(context_words)
context_words = np.random.choice(context_words, size=num_to_sample)
for word in context_words:
center_batch.append(token)
context_batch.append(word)
if len(center_batch) >= batch_size:
batches.append((center_batch, context_batch))
center_batch = []
context_batch = []
return batches
def get_analogies(vocab):
"""
Load the analogies for assessing the word vectors.
:param vocab: set; unique words in the vocabulary
:return: list; all of the analogies found in the vocabulary
"""
analogies_path = os.path.join(os.path.dirname(__file__), "../data/analogies.txt")
with open(analogies_path, "r") as f:
analogies = [line.strip().split() for line in f if line.strip().split()[0] != ":"]
in_vocab = []
for analogy in analogies:
if all(token in vocab for token in analogy):
in_vocab.append(analogy)
return in_vocab
def assess(embeddings, word2idx, idx2word, analogies):
"""
Assess the trained word vectors against the analogy test set.
:param embeddings: numpy.ndarray; trained word embeddings
:param word2idx: dict; maps word onto embedding array index
:param idx2word: dict; maps embedding array index onto word
:param vocab: set; all unique words in the vocabulary
:param analogies: list; contains the analogies; each analogy is a list of length 4
:return: float; accuracy of the word vectors on the analogy set
"""
result = []
for analogy in analogies:
embedding_0 = embeddings[word2idx[analogy[0]]]
embedding_1 = embeddings[word2idx[analogy[1]]]
embedding_2 = embeddings[word2idx[analogy[2]]]
new_embedding = embedding_1 - embedding_0 + embedding_2
closest = closest_word(new_embedding, embeddings, idx2word)
result += [1] if closest == analogy[3] else [0]
return sum(result)/len(result)
def closest_word(word_embedding, embeddings, idx2word):
"""
Finds the closest word to a given word vector
:param word_embedding: np.array; a one dimensional word vector
:param embeddings: np.ndarray; the word embedding array
:param idx2word: dict; maps embedding array index onto word
:return: string; the closest word
"""
dot = np.dot(word_embedding, embeddings.T)
cosine = dot/(np.linalg.norm(word_embedding)*np.linalg.norm(embeddings, axis=1))
closest = np.argsort(cosine)[-1]
return idx2word[closest]
|
from typing import List
from matplotlib import colors
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
#optische Abstandssensor Kennlinie
def optischeSensor(x:List[float], y:List[float]):
#background
plt.style.use('Solarize_Light2')
#graph title
plt.title("Optische Abstandssensor", loc='center')
plt.xlabel('Ausgangsstrom [mA]')
plt.ylabel('Anzeige Laser [mm]')
#line style
mpl.rc('lines', linewidth=2, linestyle='-')
plt.plot(x,y,'c')
plt.show()
#Ansprechkurve von Lichttaste
def Ansprechkurve(x:List[int], y:List[int], z1:List[int], z2:List[int]):
#background
plt.style.use('Solarize_Light2')
#graph title
plt.title("Ansprechkurve", loc='center')
plt.xlabel('a [mm]')
plt.ylabel('Tastweite [mm]')
#line style
mpl.rc('lines', linewidth=2, linestyle='--')
plt.plot(x,z1,'r', label='Warnbereich')
#x,y axis scale
plt.xlim(0,400)
plt.ylim(0,400)
mpl.rc('lines', linewidth=2, linestyle='-')
plt.plot(y,z2,'c', label='Schaltpunkt')
plt.legend()
plt.show()
if __name__=="__main__":
#Ansprechkurve
#Warnbereich von Lichtaste
x1 = [4.85,14.95,22.52,32.54,39.72,46.01,49.52,56.67,62.23,60.54,60.3,
61.12,70.12,71.39,71.26,49.13,47.12,38.25]
#Schaltpunk von lichttaste
y1 = [4.61,14.49,22.09,31.05,37.48,42.77,49.08,52.16,53.2,54.3,54.73,
42.6,39.42,24.09,-0.228]
z1 = [0,20,40,60,80,100,120,140,160,180,200,220,240,260,280,300,320,340]
z2 = [0,20,40,60,80,100,120,140,160,180,200,220,240,260,280]
#optische Abstandssensor
#Ausgangsstrom [mA]
x2 = [3.96,4.09,4.13,4.39,4.57,4.56,4.76,4.78,4.94,5.00,5.22]
#Anzeige Laser [mm]
y2 = [235,245,255,265,275,285,295,305,315,325,335]
# Ansprechkurve(x1,y1,z1,z2)
optischeSensor(x2,y2)
|
# -*- coding: utf-8 -*-
# Copyright 2019-2020 Lovac42
# Copyright 2014 Patrice Neff
# Copyright 2006-2019 Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
# Support: https://github.com/lovac42/Blitzkrieg
from .main import *
|
# Imports
from idc import BADADDR, INF_BASEADDR, SEARCH_DOWN, FUNCATTR_START, FUNCATTR_END
import idc
import idaapi
import datetime
# Settings
definePrefix = "" # Prefix for the #define Output
functionPrefix = "fn" # Prefix for Function Renaming in IDA
offsetPrefix = "o" # Prefix for Offset Renaming in IDA
# Globals
Rename = -1
FILE_LOCATION = "C:\\Users\\gigia\\source\\repos\Renny\\Project Scripts\\"
Functions = []
Objects = []
def SignatureSearch():
class Function:
def __init__(self):
self.Alias = ""
self.Reference = ""
self.Type = 0
self.HasPrefix = False
self.Position = 0
def setAlias(self, Alias):
self.Alias = Alias
def setReference(self, Reference):
self.Reference = Reference
def setType(self, Type):
self.Type = Type
def setHasPrefix(self, HasPrefix):
self.HasPrefix = HasPrefix
def setPosition(self, Position):
self.Position = Position
class Offset:
def __init__(self):
self.Alias = ""
self.Reference = ""
self.Type = 0
self.Operand = 0
self.HasPrefix = False
self.Position = 0
def setAlias(self, Alias):
self.Alias = Alias
def setReference(self, Reference):
self.Reference = Reference
def setType(self, Type):
self.Type = Type
def setOperand(self, Operand):
self.Operand = Operand
def setHasPrefix(self, HasPrefix):
self.HasPrefix = HasPrefix
def setPosition(self, Position):
self.Position = Position
input = open(FILE_LOCATION + "Signatures.txt", "r")
lines = input.readlines()
position = 1
for x in lines:
if x.find("\", 1, True]") != -1 or x.find("\", 2, True]") != -1 or x.find("\", 1, False]") != -1 or x.find("\", 2, False]") != -1:
x = x.replace("\"", "")
index = x.find("[\"") + 1
object = Function()
count = 0
while count != x.count(","):
index = index + 2
temp = x[index:]
newIndex = temp.find(",")
tempIndex = index + newIndex
if count == 0:
object.setPosition(position)
object.setAlias(x[index:tempIndex])
if count == 1:
object.setReference(x[index:tempIndex])
if count == 2:
object.setType(x[index:tempIndex])
if count == 3:
hasPrefix = x[index:tempIndex - 1]
if hasPrefix == "False":
object.setHasPrefix(False)
if hasPrefix == "True":
object.setHasPrefix(True)
index = tempIndex
count = count + 1
Functions.append(object)
position = position + 1
if x.find(", 1, 0, True]") != -1 or x.find(", 1, 1, True]") != -1 or x.find(", 1, 0, False]") != -1 or x.find(", 1, 1, False]") != -1:
x = x.replace("\"", "")
index = x.find("[\"") + 1
object = Offset()
count = 0
while count != x.count(","):
index = index + 2
temp = x[index:]
newIndex = temp.find(",")
tempIndex = index + newIndex
if count == 0:
object.setPosition(position)
object.setAlias(x[index:tempIndex])
if count == 1:
object.setReference(x[index:tempIndex])
if count == 2:
object.setType(x[index:tempIndex])
if count == 3:
object.setOperand(x[index:tempIndex])
if count == 4:
hasPrefix = x[index:tempIndex - 1]
if hasPrefix == "False":
object.setHasPrefix(False)
if hasPrefix == "True":
object.setHasPrefix(True)
index = tempIndex
count = count + 1
Objects.append(object)
position = position + 1
input.close()
# Finder Functions
def FindFuncPattern(Pattern): # Find's Func. by Pattern
addr = idc.FindBinary(0, SEARCH_DOWN, Pattern)
if addr == BADADDR: return 0
try:
return idaapi.get_func(addr).startEA
except Exception:
return 0
def FindFuncCall(Pattern): # Find's Func. by Pattern to a Call
addr = idc.FindBinary(0, SEARCH_DOWN, Pattern)
if addr == BADADDR: return 0
return idc.GetOperandValue(addr, 0)
def FindFuncFirstReference(Reference): # Find's Func. by Reference, Returns first
addr = idc.FindBinary(0, SEARCH_DOWN, "\"" + Reference + "\"")
if addr == BADADDR: return 0
dword = -1
xrefs = XrefsTo(addr)
for xref in xrefs:
dword = xref.frm
try:
return idaapi.get_func(dword).startEA
except Exception:
return 0
def FindStringByReference(Reference): # Extracts String out of Reference (//addr)
addr = idc.FindBinary(0, SEARCH_DOWN, "\"" + Reference + "\"")
if addr == BADADDR: return 0
return idc.GetString(addr)
def FindOffsetPattern(Pattern, Operand): # Find Offset by Pattern
addr = idc.FindBinary(0, SEARCH_DOWN, Pattern)
if addr == BADADDR: return 0
return idc.GetOperandValue(addr, Operand)
# Helpers
def DecToHex(Addr):
return "0x%0.2X" % Addr
def PrintWrapper(Alias, Addr, Type): # Type: 1 => Function, 2 => Offset
if Addr == BADADDR or Addr == 0 or Addr == 0x00:
print("fn" + Alias + " -> Error")
return
if Type == 0: print("#define " + Alias + " " + DecToHex(Addr))
if Type == 1: print("#define " + functionPrefix + Alias + " " + DecToHex(Addr))
if Type == 2: print("#define " + offsetPrefix + Alias + " " + DecToHex(Addr))
if Rename == 1:
if Type == 0: MakeName(Addr, str(Alias))
if Type == 1: MakeName(Addr, str(functionPrefix + Alias))
if Type == 2: MakeName(Addr, str(offsetPrefix + Alias))
return
# Main
def GenerateAddresses():
SignatureSearch()
global Rename
Rename = idc.AskYN(0, "Automaticly Update Names? (sub_549570 => PrintChat)")
if Rename == -1:
print("Exiting...")
return
print("")
print("++ Offsets (%s)" % datetime.datetime.now())
print("Why do they keep breaking...")
print("")
print("++ Functions")
for x in Functions:
if x.HasPrefix == True:
if int(x.Type) == 1: PrintWrapper(x.Alias, FindFuncPattern(x.Reference), 1)
if int(x.Type) == 2: PrintWrapper(x.Alias, FindFuncCall(x.Reference), 1)
if int(x.Type) == 3: PrintWrapper(x.Alias, FindFuncFirstReference(x.Reference), 1)
else:
if int(x.Type) == 1: PrintWrapper(x.Alias, FindFuncPattern(x.Reference), 0)
if int(x.Type) == 2: PrintWrapper(x.Alias, FindFuncCall(x.Reference), 0)
if int(x.Type) == 3: PrintWrapper(x.Alias, FindFuncFirstReference(x.Reference), 0)
print("")
print("++ Objects")
for x in Objects:
if x.HasPrefix == True:
if int(x.Type) == 1: PrintWrapper(x.Alias, FindOffsetPattern(x.Reference, int(x.Operand)), 2)
else:
if int(x.Type) == 1: PrintWrapper(x.Alias, FindOffsetPattern(x.Reference, int(x.Operand)), 0)
print("")
GenerateAddresses()
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import sys
from linux import utils
printk_log_type = utils.CachedType("struct printk_log")
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval(
"(void *)'printk.c'::log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = utils.read_memoryview(inf, start, length).tobytes()
else:
log_buf_2nd_half = log_buf_len - log_first_idx
a = utils.read_memoryview(inf, start, log_buf_2nd_half)
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
length_offset = printk_log_type.get_type()['len'].bitpos // 8
text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
text_offset = printk_log_type.get_type().sizeof
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf, pos + length_offset)
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf, pos + text_len_offset)
text_start = pos + text_offset
text = log_buf[text_start:text_start + text_len].decode(
encoding='utf8', errors='replace')
time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line)
# With python2 gdb.write will attempt to convert unicode to
# ascii and might fail so pass an utf8-encoded str instead.
if sys.hexversion < 0x03000000:
msg = msg.encode(encoding='utf8', errors='replace')
gdb.write(msg)
pos += length
LxDmesg()
|
# -*- coding: utf8 -*-
import os
import datetime
import time
import gtools.sql.compare as sql_cmp
import stat_data
#SERVER_IDS = (1, 2, 3, 4, 5, 6, 7, 8)
SERVER_IDS = (10004,10005,10006,10007,10008)
STAT_DAYS = ((2014,9,24), (2014,9,25))
if __name__ == "__main__":
os.system("mkdir log")
for server_id in SERVER_IDS:
os.system("mkdir log/%d" % (server_id,))
import pdb
#pdb.set_trace()
for day in STAT_DAYS:
year, month, day = day
next_day = time.localtime(time.mktime(datetime.datetime(year, month, day, 0, 0, 0).timetuple()) + 3600 * 24)
year2 = next_day[0]
month2 = next_day[1]
day2 = next_day[2]
data_file = "data/data-%d-%d-%d_%d.sql" % (year, month, day, server_id)
data_file2 = "data/data-%d-%d-%d_%d.sql" % (year2, month2, day2, server_id)
if not os.path.exists(data_file2):
continue
if not os.path.exists(data_file):
data_file = "../../sql/db.sql"
def data_updated(table, order_fields, kv_pairs):
if stat_data.USEFULL_FIELDS.has_key(table):
if not os.path.exists("log/%d" % (server_id,)):
os.system("mkdir log/%d" % (server_id,))
fp = open("log/%d/db_ny_%s_%s_%04d%02d%02d.log" % (server_id, table, server_id, year, month, day), "a")
line = ""
for field in order_fields:
line += "%s=" % (field,)
value = kv_pairs[field]
if type(value) == str:
line += "%s" % (value.replace('\n', ''),)
else:
line += str(value)
line += "`"
line = line[0:-1] + "\n"
fp.write(line)
fp.close()
sql_cmp.mk_diff(data_file, data_file2, data_updated, tuple(stat_data.USEFULL_FIELDS.keys())) |
import requests
import time
import json
import datetime
from Bot_body import telegramBot
bot = telegramBot()
bot.chunk.fill_date(datetime.datetime.today())
while 1:
bot.update()
time.sleep(1) |
import warnings
warnings.simplefilter("ignore", UserWarning)
from scipy.optimize import curve_fit
import numpy as np
import sys
import pdb
import os
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
import Generate_Plots as genplt
folder = 'Figures/{}/'.format(
sys.argv[0],
)
#amp_phase = np.genfromtxt('Data_1.0Hz.txt',skip_header=1)
# Add my local path to the relevant modules list
path = os.getcwd()
rootpath = path.split('Korea')[0]
data = np.genfromtxt('Data/damped_ramp_amp.txt',skip_header=1,delimiter=',')
Amps = data[:,2]
Phi = data[:,3]
norm_tacc = data[:,4]
slope = data[:,0]
zeta = data[:,1]
pdb.set_trace()
genplt.plot_3d(zeta,norm_tacc,Amps,
r'$\frac{1}{\omega_n}$',
r'Acceleration',
'Percent Vibration',folder=folder,name_append='Damped Ramp Amp')
genplt.plot_3d(zeta,norm_tacc,Phi,
r'$\frac{1}{\omega_n}$',
r'Acceleration',
'Phase Shift',folder=folder,name_append='Damped Ramp Shift')
data = np.genfromtxt('Data/ramp_amp.txt',skip_header=1,delimiter=',')
Amps = data[:,2]
Phi = data[:,3]
norm_tacc = data[:,4]
slope = data[:,0]
freq = data[:,1]
genplt.plot_3d(freq,norm_tacc,Amps,
r'$\frac{1}{\omega_n}$',
r'Acceleration',
'Percent Vibration',folder=folder,name_append='3D_Sens_1.0_Singularity')
genplt.plot_3d(1/freq,norm_tacc,Phi,
r'$\frac{1}{\omega_n}$',
r'Acceleration',
'Phase Shift',folder=folder,name_append='Phase Shift')
guess_a = -.5
guess_b = 0.4
guess_c = 0.3
guess_d = 0.1
bounds = np.array([tuple((-np.inf,np.inf)),
tuple((-np.inf,np.inf)),
tuple((-np.inf,np.inf)),
tuple((-np.inf,np.inf))])
p0 = [guess_a,guess_b,guess_c,guess_d]
bounds = bounds.T
def response(x,a,b):
return a * x**b + 1
def response(x,a,b,c,d):
return (a * x**3 + b * x**2 + d*x + 1) / (c * x**2 + d*x + 1)
# now do the fit
fit = curve_fit(
response,
data[:,4],
data[:,2],
p0=p0,method='trf',
bounds=bounds
)
fit = fit[0]
print(fit)
response_fit = response(data[:,4],*fit)
guess_fit = response(data[:,4],*p0)
data = data[data[:,4].argsort()]
genplt.compare_responses(data[:,4],
data[:,2],'Amplitude',
response_fit,'Guess',
guess_fit,'Actual Guess',
folder=folder,name_append='Amps Normalized time')
genplt.compare_responses(data[:,4],
data[:,3],'Amplitude',folder=folder,name_append='Shift Normalized time')
|
from lxml import etree
tree = etree.parse("nlp.txt.xml")
root = tree.getroot()
import sys
def find_tuple(dependency):
# traverse all deps
for dep in dependency.findall("dep"):
if "type" in dep.attrib and dep.attrib["type"] == "nsubj":
for dep2 in dependency.findall("dep"):
if "type" in dep2.attrib and dep2.attrib["type"] == "dobj" \
and dep.find("governor").attrib["idx"] == dep2.find("governor").attrib["idx"]:
sys.stdout.write("{}\t{}\t{}\n".format(
dep.find("dependent").text, dep.find("governor").text,
dep2.find("dependent").text))
docment = root[0]
sentences = docment.find("sentences")
for sentence in sentences:
dependencies = sentence.xpath(
'//dependencies[@type="collapsed-dependencies"]')
for dependency in dependencies:
find_tuple(dependency)
sys.exit(0)
|
N, M = input().split()
N, M = [int(N), int(M)]
map = [ [] ] * N
for i in range(N):
map[i] = input()
min = 987654321
for s_x in range(N-7):
for s_y in range(M-7):
cnt1 = 0
cnt2 = 0
for i in range(s_x, s_x+8):
for j in range(s_y, s_y+8):
distance = s_x - i + s_y - j;
if distance % 2 == 0 and map[i][j] != map[s_x][s_y]:
cnt1 += 1
if distance % 2 == 0 and map[i][j] == map[s_x][s_y]:
cnt2 += 1
if distance % 2 == 1 and map[i][j] == map[s_x][s_y]:
cnt1 += 1
if distance % 2 == 1 and map[i][j] != map[s_x][s_y]:
cnt2 += 1
if min > cnt1:
min = cnt1
if min > cnt2:
min = cnt2
print (min) |
# -*- coding: utf-8 -*-
#
html = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<style type="text/css">
#page-content {
background: white;
width: 80mm;
font-family: Arial,Helvetica Neue,Helvetica,sans-serif;
}
@page {
size: 80mm 210mm; /*{{ order.total_height }}*/
/*margin: 2mm;*/
margin-left: 4mm;
margin-right: 4mm;
padding: 0;
}
/* print */
/*@page {
margin-left: 4mm;
margin-right: 4mm;
padding: 0;
}*/
@media print {
* {
padding-left: 0 !important;
padding-right: 0 !important;
}
.areaNfce {
border: none !important;
}
}
/* geral */
* {
padding: 0;
margin: 0;
}
body {
font-family: Arial, Helvetica, sans-serif;
min-width: 58mm;
max-width: 80mm;
}
.areaNfce {
min-width: 58mm;
margin: 4px auto;
border: 1px solid #c5c6c1;
padding: 0 8px 0 8px;
}
.areaNfce table {
width: 100%%;
margin-top: 5px;
}
.areaNfce table thead {
float: left;
width: 100%%;
min-height: 50px;
}
.areaNfce table tr td {
text-align: left;
font-size: 12px;
color: #000;
vertical-align: top;
}
/* mainNfce */
.areaNfce table.mainNfce {
border-top: none;
}
.mainNfce .titMain {
display: block;
font-size: 10px;
}
.areaNfce table.mainNfce tr td {
text-align: left;
}
.areaNfce table.mainNfce tr td .logo img {
height: 47px;
width: 47px;
padding-right: 4px;
}
.areaNfce table.detailSale {
margin-top: 10px;
table-layout: fixed;
}
.areaNfce table.detailSale tr td {
text-align: left;
padding: 5px;
font-weight: bolder;
letter-spacing: 0px;
}
.areaNfce table.detailSale tr td:first-child {
border-left: none;
}
.areaNfce table.detailSale tr td:last-child {
text-align: right;
width: 60px;
vertical-align: bottom;
}
.detailSale td {
display: inline;
}
.formPayment tr td {
font-size: 10px !important;
}
.formPayment .tdRight {
margin-left: 100px;
}
.formPayment .mRight {
margin-left: 34px;
}
/* formPayment */
.valuePayment .paymentText {
margin-top: 10px;
font-size: 12px;
font-weight: 300;
}
.formPayment .tRight {
text-align: right;
}
.valuePayment td.paymentText span {
text-align: right;
}
.formPayment .lColor {
font-size: 12px;
color: #808080;
font-weight: 300;
}
.descQt .qtde {
margin-top: 10px;
font-size: 12px;
color: #808080;
font-weight: 300;
}
.descQt .qtde.last {
color: #000;
margin-bottom: 10px;
display: block;
}
.descQt td.qtde span {
text-align: right;
display: inline;
float: right;
}
/* explanations */
.areaNfce table.explanations tr td {
text-align: center;
}
.contingencia {
background: #808080;
border: 1px solid #d4d4d4;
}
.contingencia div {
background: #fff;
width: 216px;
margin: 0 auto;
padding: 10px;
}
.explanations .contingencia h1 {
font-size: 15px;
}
/* postTax */
.areaNfce table.postTax tr td {
text-align: center;
}
.postTax .text {
font-weight: bold;
color: #000;
font-size: 10px;
}
.postTax a {
text-decoration: none;
color: #000;
display: block;
text-align: center;
}
/* User */
.areaNfce table.user tr td {
text-align: center;
}
/* Barcode */
.barcode td {
vertical-align: top !important;
}
.barcode .info-consumer {
text-align: center;
}
/*.barcode .section-info-small { display: none; }*/
.barcode .section-info {
width: 100%%;
}
.barcode .section-info .info-consumer .lColor {
font-size: 12px;
color: #808080;
font-weight: 300;
text-transform: none;
}
.barcode .section-info .info-consumer h5 {
font-size: 14px;
font-weight: bold;
text-transform: uppercase;
}
.barcode .section-info .info-consumer p {
line-height: 15px;
font-family: 'Arial', Helvetica, sans-serif;
font-size: 12px;
margin-bottom: 4px;
}
.qrCode {
/*width: 25mm;
min-height: 25mm;*/
margin: 0 auto;
text-align: center;
}
/* footer */
.nfceFooter p {
font-size: 12px !important;
color: #000 !important;
font-weight: 300;
font-family: 'Arial', Helvetica, sans-serif;
}
.nfceFooter p span {
display: block;
text-align: center;
}
.table-contingencia {
border-top: 1px solid #000;
border-bottom: 1px solid #000;
/*background: white url(data:image/png;charset=utf-8;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAICAYAAADA+m62AAAAGklEQVQoU2NkYGCQZCACMBKhBqyENgoHuxsBNlAAO7Z6FzgAAAAASUVORK5CYII=);*/
position: relative;
}
.table-contingencia .aviso-contingencia {
background-color: white;
text-align: center;
width: 100%%;
margin: 0 auto;
padding: 5px;
}
.table-contingencia .aviso-contingencia h5 {
font-size: 14px;
font-weight: bold;
margin: 0;
padding: 0;
text-transform: uppercase;
}
.table-contingencia .aviso-contingencia p {
font-size: 12px;
}
.td-text-right {
float: right;
text-align: right;
}
@media print {
.table-contingencia {
overflow: hidden;
}
.table-contingencia:before {
position: absolute;
width: 100%%;
height: 100%%;
z-index: 2;
/*content: url('data:image/gif;charset=utf-8;base64,R0lGODlhIANkAIAAAObm5v///yH/C1hNUCBEYXRhWE1QPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS4zLWMwMTEgNjYuMTQ1NjYxLCAyMDEyLzAyLzA2LTE0OjU2OjI3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIiB4bWxuczpzdFJlZj0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlUmVmIyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dzKSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDo5QUY0REVDMTAwMzIxMUU3OUQwRUM3OTJEQjVBMjMyQyIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDo5QUY0REVDMjAwMzIxMUU3OUQwRUM3OTJEQjVBMjMyQyI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOjlBRjRERUJGMDAzMjExRTc5RDBFQzc5MkRCNUEyMzJDIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjlBRjRERUMwMDAzMjExRTc5RDBFQzc5MkRCNUEyMzJDIi8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpSREY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+Af/+/fz7+vn49/b19PPy8fDv7u3s6+rp6Ofm5eTj4uHg397d3Nva2djX1tXU09LR0M/OzczLysnIx8bFxMPCwcC/vr28u7q5uLe2tbSzsrGwr66trKuqqainpqWko6KhoJ+enZybmpmYl5aVlJOSkZCPjo2Mi4qJiIeGhYSDgoGAf359fHt6eXh3dnV0c3JxcG9ubWxramloZ2ZlZGNiYWBfXl1cW1pZWFdWVVRTUlFQT05NTEtKSUhHRkVEQ0JBQD8+PTw7Ojk4NzY1NDMyMTAvLi0sKyopKCcmJSQjIiEgHx4dHBsaGRgXFhUUExIREA8ODQwLCgkIBwYFBAMCAQAAIfkEAAAAAAAsAAAAACADZAAAAv+Ej6nL7Q+jnLTai7PevPsPhuJIluaJpuqKBe4Lx/JM1/aN5/rO9/4PDAqHxKLxiEwql8ym8wmNSqfUqvWKzWq33K73Cw6Lx+Sy+YxOq9fstvsNF7Pm9Lr9js/r9/y+/w8YyBJHWGh4iJiouMjY6PgIGSk5SVlpeYmZqbnJ2emJKRgqOkpaanqKmqq6avrp+gobKztLW2t7i5uru8vb6/sLHCz8xFpsfIycrLzM3Lw8DB0tPU1dbX2Nna29zd3t/Q1e6zxOXm5+jp6u3hre7v4OHy8/T19vf4+fr79fue7/DzCgwIEEmfE7iDChwoUMGzp8CDGixIkUC1q8iDGjxo3/HCFQ/AgypMiRJEuaPIkypUp9HVu6fAkzpkxRK2vavIkzp86dPHv6/FlyptChRIsaPdoAqNKlTJs6fQo1qtSpVJUgvYo1q9at56p6/Qo2rNixZMuaPRuLq9q1bNu6zYM2rty5dOvavYs3r9e3fPv6/Qv4gd7BhAsbPow4seLF4gI7fgw5clHGlCtbvow5s+bNnGNI/gw6tGiAnUubPo06terVrBmOfg07tuxUrWvbvo07t+7dvBnN/g08uPBBvYsbP448ufLlyIc7fw49+gTm1Ktbv449u/aq0rt7/y58u/jx5MubP4/+Hfj17NtDTg8/vvz59OvbV+Q+v/79W+/7XP8PYIACDkggDvwdiGCCLxXIYIMOPghhhLgpSGGFFpImYYYabshhhx6GdWGIIo6ozIcmnohiiiqu2BCJLr4II00szkhjjTbeiKMuMe7IY4905AhkkEIOSWSRahQAADs=');*/
}
.table-contingencia .aviso-contingencia {
position: relative;
z-index: 10;
}
}
</style>
</head>
<body>
<div class="areaNfce">
<table class="mainNfce">
<thead>
<tr>
<td id="companyLogo">
<div class="logo">
<img src="%(url_logo)s" />
</div>
</td>
<td class="titMain">
<p>
<span class="label">CNPJ: %(nl_company_cnpj_cpf)s </span><br><span style="">%(ds_company_issuer_name)s</span>
</p>
<p>%(ds_company_address)s, %(ds_company_neighborhood)s, %(ds_company_complement)s, %(ds_company_number)s, %(ds_company_city_name)s, %(ds_company_uf)s</p>
<p>CEP: %(ds_company_zip_code)s</p>
<p>TEL: %(ds_company_fone)s</p>
<!--<p>Documento Auxiliar da Nota Fiscal de Consumidor Eletrônica</p>-->
</td>
</tr>
</thead>
</table>
<table class="table-contingencia">
<tr>
<td>
<div class="aviso-contingencia">
<!--<h5>Emitida em Contingencia</h5>
<p>Pendente de autorização</p>-->
<p style="font-weight: bold">Documento Auxiliar da Nota Fiscal de Consumidor Eletrônica</p>
</div>
</td>
</tr>
</table>
<table class="formPayment" style="border-bottom: 1px solid #000;">
<tr>
<td><strong>Código</strong></td>
<td><strong>Descrição</strong></td>
<td class="tdRight"><strong>Qtde</strong></td>
<td class="tdRight"><strong>UN</strong></td>
<td class="tRight"><strong>Vl Unit.</strong></td>
<td class="tRight"><strong>Vl Total</strong></td>
</tr>
%(table_items)s
</table>
<table class="descQt" style="border-bottom: 1px solid #000;">
<!--<tr>
<td>Itens unitários<span class="td-text-right">[(qtd_unit_itens)]</span></td>
</tr>-->
<tr>
<td>Qtde total de itens <span class="td-text-right">%(qtd_itens)s</span></td>
</tr>
<tr>
<td>Valor total R$ <span class="td-text-right">%(tot_product)s</span></td>
</tr>
<tr id="discount">
<td>Desconto R$ <span class="td-text-right">%(vl_discount)s</span></td>
</tr>
<tr id="shipping">
<td class="last">Frete R$ <span class="td-text-right">%(vl_shipping)s</span></td>
</tr>
<tr>
<td class="last"><strong>Valor a Pagar R$ <span class="td-text-right">%(vl_total)s</span></strong></td>
</tr>
</table>
<table class="valuePayment" style="border-bottom: 1px solid #000;">
<tr>
<td>FORMA PGTO. <span class="td-text-right">VALOR PAGO R$</span></td>
</tr>
%(payments)s
</table>
<table class="postTax" style="border-bottom: 1px solid #000;">
<tr>
<td><span id="url_consulta" class="text">Consulte pela Chave de Acesso em <br><a style="color: blue" href="">%(url_sefaz)s</a></span></td>
</tr>
<tr>
<td>%(ds_danfe)s</td>
</tr>
</table>
<table>
<tr>
<td>
<div class="qrCode">
<img width="110px" src="%(QRCODE)s" />
</div>
</td>
</tr>
</table>
<table class="barcode">
<tr class="section-info">
<td style="text-align: center" class="info-consumer">
<p><strong>Consumidor</strong> %(consumer)s</p>
<p style="font-size: 10px"><strong>NFCe nº %(nl_invoice)s Série %(ds_invoice_serie)s %(dt_invoice_issue)s</strong></p>
<p><strong>Via consumidor</strong></p>
<p><strong>Protocolo de autorização:</strong>%(ds_protocol)s</p>
<p><strong>Data de autorização:</strong>%(dt_hr_invoice_issue)s</p>
<!--<div class="contingency-text">
<h5>Emitida em Contingencia</h5>
<p>Pendente de autorização</p>
</div>-->
<!--<div id="qrCode" class="qrCode">
<img src="%(QRCODE)s" />
</div>-->
</td>
</tr>
</table>
<table class="nfceFooter" id="stateFiscalMessage">
<tr>
<td><p style="text-align: center">%(state_fiscal_message)s</p></td>
</tr>
</table>
<table class="nfceFooter">
<tr>
<td><p style="text-align: center">%(approximate_tax)s</p></td>
</tr>
</table>
<table class="nfceFooter">
<tr>
<td><p style="text-align: left; font-size: 10px">%(additional_information)s</p></td>
</tr>
</table>
<table class="nfceFooter">
<tr>
<td style="text-align: center; font-size: 10px"><strong>Empresa de Software www.empresa.com</strong></td>
</tr>
</table>
</div>
</body>
</html>
""" |
# coding: utf-8
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
binstr = bin(n)[2:].zfill(32)
ans = int(binstr[::-1], base=2)
return ans
a = Solution()
print a.reverseBits(43261596) |
#!/opt/loca/bin/python
import sys, re, itertools, optparse
from math import *
optParser = optparse.OptionParser(
usage = "python %prog [options] <number of tables to combine> <miRNA DESeq Table> " +
"<transcript DESeq table> <alias table> <That number of tables> <out_prefix>",
description=
"This script takes output tables from the combineInteractionsAndFatiGOprocessHits.py script " +
"and outputs a table that is suitable for the online Hive Plot tool or for further processing " +
"into a DOT table for HiveR.",
epilog =
"Written by Warren McGee (warren-mcgee@fsm.northwestern.edu), " +
"Jane Wu Lab, Northwestern University, Chicago, USA. (c) 2014. " +
"Released under the terms of the GNU General Public License v3." )
(opts, args) = optParser.parse_args()
num_tables = int(args[0])
tables = args[4:-1]
if(len(tables) < num_tables):
tables = tables[0].split(" ")[1:]
if len( args ) != num_tables + 5 and len(tables) != num_tables:
sys.stderr.write( sys.argv[0] + ": Error: Please provide the number of tables as you specified.\n" )
sys.stderr.write( " Call with '-h' to get usage information.\n" )
sys.exit( 1 )
node_out = args[-1] + "_nodeList.txt"
edge_out = args[-1] + "_edgeList.txt"
mirna_file = args[1]
transcript_file = args[2]
alias_file = args[3]
alias_dict = {}
deseq_dict = {}
mirna_dict = {}
gene_dict = {}
term_dict = {}
edge_dict = {}
max_mirRank = max_mirEdgeRank = max_mirLogp = min_mirLogp = 0
max_geneRank = max_geneLogp = min_geneLogp = 0
max_termRank = 0
def processDeseq(fileName):
with open(fileName, 'r') as input:
header = input.readline()
for line in input.readlines():
lineList = line.strip("\n").split("\t")
id = lineList[0]
baseMean = float(lineList[1]) + 1
if lineList[2] == "NA":
log2FC = 0
else:
log2FC = float(lineList[2])
if lineList[-1] == "NA":
padj = 1
else:
padj = float(lineList[-1])
if baseMean < 30:
if log2FC < 0:
overall_rank = log2FC - log10(baseMean)
else:
overall_rank = log2FC + log10(baseMean)
else:
if log2FC < 0:
overall_rank = log2FC + log10(padj) - log10(baseMean)
else:
overall_rank = log2FC + abs(log10(padj)) + log10(baseMean)
deseq_dict[id] = overall_rank
def processTable(fileName):
global max_mirLogp, min_mirLogp, max_geneLogp, min_geneLogp
global max_mirRank, max_mirEdgeRank, max_geneRank, max_termRank
with open(fileName, 'r') as input:
header = input.readline()
for line in input.readlines():
lineList = line.strip("\n").split("\t")
mir_acc, mir_name, pDE, pFatiscan, pCombined = lineList[:5]
ucsc_id, ucsc_logp, gene_name, gene_desc = lineList[5:9]
category, term_id, term_name, term_size, percent, log_odds, pFatigo = lineList[9:]
mirna_rank = deseq_dict[mir_acc]
mirna_edgeRank = -log10( float( pFatiscan ) )
ucsc_rank = deseq_dict[ucsc_id]
ucsc_fdr = str( 10**( -abs( float( ucsc_logp ) ) ) )
term_rank = str( -log10( float( pFatigo ) ) )
# calculate the logP of the miRNA's differential expression
if mirna_rank < 0:
mir_logp = log10(float(pDE))
else:
mir_logp = -log10(float(pDE))
# add the miRNA, gene, and/or pathway/GO term if it's not already been stored
if mir_acc not in mirna_dict:
mirna_dict[mir_acc] = [mir_name, 0, 0, str(mirna_edgeRank), pCombined, mir_logp, mirna_rank]
if term_id not in term_dict:
term_dict[term_id] = [term_name, 0, 0, category, pFatigo, term_rank]
if ucsc_id not in gene_dict:
gene_dict[ucsc_id] = [gene_name, 0, 0, ucsc_fdr, ucsc_logp, ucsc_rank]
# add the edge if it's not already been stored; increment the edge count
if mir_acc in edge_dict:
if ucsc_id not in edge_dict[mir_acc]:
edge_dict[mir_acc].append(ucsc_id)
mirna_dict[mir_acc][1] += 1
gene_dict[ucsc_id][2] += 1
else:
edge_dict[mir_acc] = [ucsc_id]
mirna_dict[mir_acc][1] += 1
gene_dict[ucsc_id][2] += 1
if term_id in edge_dict:
if ucsc_id not in edge_dict[term_id]:
edge_dict[term_id].append(ucsc_id)
gene_dict[ucsc_id][1] += 1
term_dict[term_id][2] += 1
else:
edge_dict[term_id] = [ucsc_id]
gene_dict[ucsc_id][1] += 1
term_dict[term_id][2] += 1
if gene_name in alias_dict:
gene_name = alias_dict[gene_name]
# update the max values if necessary (use to normalize the colors and node height)
if abs(mirna_rank) > max_mirRank:
max_mirRank = abs(mirna_rank)
if float(mir_logp) > max_mirLogp:
max_mirLogp = float(mir_logp)
elif float(mir_logp) < min_mirLogp:
min_mirLogp = float(mir_logp)
if mirna_edgeRank > max_mirEdgeRank:
max_mirEdgeRank = mirna_edgeRank
if abs(ucsc_rank) > max_geneRank:
max_geneRank = abs(ucsc_rank)
if float(ucsc_logp) > max_geneLogp:
max_geneLogp = float(ucsc_logp)
elif float(ucsc_logp) < min_geneLogp:
min_geneLogp = float(ucsc_logp)
if float(term_rank) > max_termRank:
max_termRank = float(term_rank)
def returnMirNodeColor(mir_acc):
mir_logp = float( mirna_dict[mir_acc][-2] )
mir_rank = float( mirna_dict[mir_acc][-1] )
if mir_logp < 0:
red = int(ceil(255*(1- mir_logp / min_mirLogp)))
green = int(ceil(255*(1- mir_logp / min_mirLogp)))
blue = 255
alpha = int(ceil(150*(abs(mir_rank) / max_mirRank))) + 50
else:
red = 255
green = int(ceil(255*(1 - mir_logp / max_mirLogp)))
blue = int(ceil(255*(1 - mir_logp / max_mirLogp)))
alpha = int(ceil(150*(mir_rank / max_mirRank))) + 50
color = '#%02x%02x%02x%02x' % tuple([red, green, blue, alpha])
if re.search("-", color):
print >> sys.stderr, mir_acc + " with mir_rank/max_rank " + str([mir_rank, max_mirRank]) + \
" and mir_logp/max/min_logp " + str([mir_logp, max_mirLogp, min_mirLogp]) + " produced the invalid hex color " + color
sys.exit(1)
return [color, red, green, blue, alpha]
def returnMirEdgeColor(mir_acc):
mir_fatiscan = float( mirna_dict[mir_acc][3] )
mir_rank = float( mirna_dict[mir_acc][-1] )
if mir_rank < 0:
red = int(ceil(255*(1 - mir_fatiscan / max_mirEdgeRank)))
green = int(ceil(255*(1 - mir_fatiscan / max_mirEdgeRank)))
blue = 255
alpha = int(ceil(150*(abs(mir_rank) / max_mirRank))) + 50
else:
red = 255
green = int(ceil(255*(1 - mir_fatiscan / max_mirEdgeRank)))
blue = int(ceil(255*(1 - mir_fatiscan / max_mirEdgeRank)))
alpha = int(ceil(150*(mir_rank / max_mirRank))) + 50
color = '#%02x%02x%02x%02x' % tuple([red, green, blue, alpha])
return [color, red, green, blue, alpha]
def returnGeneNodeColor(ucsc_id):
gene_logp = float( gene_dict[ucsc_id][-2] )
gene_rank = float( gene_dict[ucsc_id][-1] )
if gene_logp < 0:
red = int(ceil(255*(1-gene_logp / min_geneLogp)))
green = int(ceil(255*(1-gene_logp / min_geneLogp)))
blue = 255
alpha = int(ceil(150*(abs(gene_rank) / max_geneRank))) + 50
else:
red = 255
green = int(ceil(255*(1-gene_logp / max_geneLogp)))
blue = int(ceil(255*(1-gene_logp / max_geneLogp)))
alpha = int(ceil(150*(gene_rank / max_geneRank))) + 50
color = '#%02x%02x%02x%02x' % tuple([red, green, blue, alpha])
if re.search("-", color):
print >> sys.stderr, ucsc_id + " with gene_rank/max_rank " + str([gene_rank, max_geneRank]) + \
" and gene_logp/max/min_logp " + str([gene_logp, max_geneLogp, min_geneLogp]) + " produced the invalid hex color " + color
sys.exit(1)
return [color, red, green, blue, alpha]
def returnNodeValue(id, dict):
return abs(float( dict[id][-1] ))
def returnTermNodeColor(term_id):
category = term_dict[term_id][3]
alpha = 200
if category == "KEGG":
red = 230
green = 159
blue = 0
elif category == "REACTOME":
red = 0
green = 158
blue = 115
elif category == "BIOCARTA":
red = 86
green = 180
blue = 233
elif category == "GOBP":
red = 204
green = 121
blue = 167
color = '#%02x%02x%02x%02x' % tuple([red, green, blue, alpha])
return [color, red, green, blue, alpha]
def returnEdgeColor(rgbList, degree, maxDegree):
alpha = 255*(log(degree) / log(maxDegree))
colorList = rgbList + [alpha]
color = '#%02x%02x%02x%02x' % tuple(colorList)
return [color] + colorList
def returnLargestDegree(dict):
maxDegree = 0
for item in dict.keys():
degree = dict[item][1] + dict[item][2]
if degree > maxDegree:
maxDegree = degree
return maxDegree
def returnMirNodeHeight(mir_acc):
degree = mirna_dict[mir_acc][1] + mirna_dict[mir_acc][2]
value = int(ceil(3*degree / max_mirDegree)) + 1
return value
def returnGeneNodeHeight(ucsc_id):
degree = gene_dict[ucsc_id][1] + gene_dict[ucsc_id][2]
value = int(ceil(3*degree / max_geneDegree)) + 1
return value
def returnTermNodeHeight(term):
degree = term_dict[term][1] + term_dict[term][2]
value = int(ceil(3 * degree / max_termDegree)) + 1
return value
with open(alias_file, 'r') as alias:
for line in alias.readlines():
lineList = line.strip("\n").split("\t")
name = lineList[2]
alias = lineList[1]
if alias != "":
alias_dict[name] = alias
processDeseq(mirna_file)
processDeseq(transcript_file)
for file in tables:
processTable(file)
max_mirDegree = returnLargestDegree(mirna_dict)
max_geneDegree = returnLargestDegree(gene_dict)
max_termDegree = returnLargestDegree(term_dict)
with open(node_out, 'w') as output:
headerList = ["id", "axis", "node_label", "node_value", "node_height",
"node_color","edge_color","num_Outgoing", "num_Incoming","node_r","node_g","node_b",
"node_a","edge_r","edge_g","edge_b","edge_a","group","FDR","Rank"]
header = "\t".join(headerList) + "\n"
output.write(header)
axis = "TDP-43-regulated_miRNA"
for mir_acc in sorted(mirna_dict.keys()):
node_label, num_out, num_in, edgeRank, fdr, logp, rank = mirna_dict[mir_acc]
node_height = returnMirNodeHeight(mir_acc)
node_value = returnNodeValue(mir_acc, mirna_dict)
if float(logp) < 0:
group = "upGenesDownMiRNAs"
else:
group = "downGenesUpMiRNAs"
nodeColorList = returnMirNodeColor(mir_acc)
edgeColorList = returnMirEdgeColor(mir_acc)
lineList = [mir_acc, axis, node_label, str(node_value), str(node_height),
nodeColorList[0], edgeColorList[0], str(num_out), str(num_in)]
newLine = "\t".join(lineList) + \
"\t" + "\t".join(str(x) for x in nodeColorList[1:]) + \
"\t" + "\t".join(str(x) for x in edgeColorList[1:]) + \
"\t" + "\t".join([group, fdr, str(rank)]) + "\n"
output.write(newLine)
axis = "Target_Gene"
for ucsc_id in sorted(gene_dict.keys()):
node_label, num_out, num_in, fdr, logp, rank = gene_dict[ucsc_id]
node_height = returnGeneNodeHeight(ucsc_id)
node_value = returnNodeValue(ucsc_id, gene_dict)
if float(logp) < 0:
group = "downGenesUpMiRNAs"
else:
group = "upGenesDownMiRNAs"
nodeColorList = returnGeneNodeColor(ucsc_id)
#edgeColorList = returnEdgeColor(nodeColorList[1:4], node_value, max_geneDegree)
edge_color = "#00000000"
edge_r = edge_g = edge_b = edge_a = ""
lineList = [ucsc_id, axis, node_label, str(node_value), str(node_height),
nodeColorList[0], edge_color, str(num_out), str(num_in)]
newLine = "\t".join(lineList) + \
"\t" + "\t".join(str(x) for x in nodeColorList[1:]) + \
"\t" + "\t".join([edge_r, edge_g, edge_b, edge_a, group, fdr, str(rank)]) + "\n"
output.write(newLine)
axis = "Fatigo"
for term in sorted(term_dict.keys()):
node_label, num_out, num_in, category, fdr, rank = term_dict[term]
node_height = returnTermNodeHeight(term)
node_value = returnNodeValue(term, term_dict)
nodeColorList = returnTermNodeColor(term)
edgeColorList = returnEdgeColor(nodeColorList[1:4], node_value, max_termDegree)
lineList = [term, axis, node_label, str(node_value), str(node_height),
nodeColorList[0], edgeColorList[0], str(num_out), str(num_in)]
newLine = "\t".join(lineList) + \
"\t" + "\t".join(str(x) for x in nodeColorList[1:]) + \
"\t" + "\t".join(str(x) for x in edgeColorList[1:]) + \
"\t" + "\t".join([category, fdr, str(rank)]) + "\n"
output.write(newLine)
with open(edge_out, 'w') as output:
#header = "\t".join(["Outgoing Node", "Incoming Node"]) + "\n"
#output.write(header)
for target in sorted(edge_dict.keys()):
nodeList = edge_dict[target]
for node in nodeList:
newLine = "\t".join([node, target]) + "\n"
output.write(newLine) |
from ..context import ptpy
import pytest
available_camera = None
try:
available_camera = ptpy.PTPy(knowledge=False)
except Exception as e:
print(e)
pass
# Use the same camera for a testing session. And skip all tests that use it.
@pytest.fixture(scope='session', autouse=True)
def camera():
if available_camera is None:
if not pytest.config.getoption('--expect-camera'):
pytest.skip('No camera available to test')
else:
pytest.fail('Expected a camera but None was found')
return available_camera
# Each test gets the latest operations and properties supported, since these
# may change on different functional modes.
@pytest.fixture(scope='function', autouse=True)
def device_properties(camera):
device_props = (
camera.get_device_info().DevicePropertiesSupported if camera else []
)
return device_props
@pytest.fixture(scope='function', autouse=True)
def device_operations(camera):
device_ops = (
camera.get_device_info().OperationsSupported if camera else []
)
return device_ops
# TODO: This is a hacky solution to the lack of fixtures in parametrize. It
# should be expunged if pytests ends up fixing that.
def pytest_generate_tests(metafunc):
if 'device_property' in metafunc.fixturenames:
device_properties = (
available_camera.get_device_info().DevicePropertiesSupported if
available_camera else []
)
metafunc.parametrize('device_property', device_properties)
if 'device_operation' in metafunc.fixturenames:
device_operations = (
available_camera.get_device_info().OperationsSupported if
available_camera else []
)
metafunc.parametrize('device_property', device_operations)
|
import os
import json
import time
import warnings
import numpy as np
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.models import load_model
from keras.callbacks import EarlyStopping
configs = json.loads(open(os.path.join(os.path.dirname(__file__), 'configs.json')).read())
warnings.filterwarnings("ignore") #Hide messy Numpy warnings
seed = np.random.seed(seed=24)
def build_network(layers, data_gen_train, data_gen_test, steps_per_epoch, configs):
model = Sequential()
model.add(LSTM(
input_dim=layers[0],
output_dim=layers[1],
return_sequences=True)
)
model.add(Activation("tanh"))
model.add(Dropout(
0.5,
seed=seed)
)
model.add(LSTM(
layers[2],
return_sequences=False)
)
model.add(Activation("tanh"))
model.add(Dropout(
0.5,
seed=seed)
)
model.add(Dense(
output_dim=layers[3])
)
model.add(Activation("linear"))
start = time.time()
model.compile(
loss=configs['model']['loss_function'],
optimizer=configs['model']['optimiser_function'],
metrics=['mse', 'mae', 'mape', 'acc']
)
print("> Compilation Time : ", time.time() - start)
start2 = time.time()
history = model.fit_generator(
data_gen_train,
steps_per_epoch=steps_per_epoch,
epochs=configs['model']['epochs'],
callbacks=[EarlyStopping(monitor='mean_squared_error', min_delta=5e-5, patience=20, verbose=1)]
)
print("> Training Time : ", time.time() - start2)
model.save(configs['model']['filename_model'])
print('> Model Trained! Weights saved in', configs['model']['filename_model'])
return model, history
def load_network(filename):
#Load the h5 saved model and weights
if(os.path.isfile(filename)):
return load_model(filename)
else:
print('ERROR: "' + filename + '" file does not exist as a h5 model')
return None |
# Author: Curran Lipsett
# Date: 12/18/2014
print("Hello World")
# This is a simple print statement
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from bts.core import tokenize
from bts.models import TermHits, ModelTerms, Field, TermUpdate
from datetime import datetime, date, timedelta
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
import logging
TERM_DELIMITER = "@|@"
MAX_RUNTIME = timedelta(seconds=15)
def request_indexing(model):
"""Request that the supplied model object be indexed asynchronously. This is
the preferred mechanism for adding or updating a model in the index.
"""
if isinstance(model, db.Key):
key = model
else:
descriptor = None
try:
descriptor = model.bts_fields
except:
pass
if descriptor:
currently_indexing = False
try:
currently_indexing = model.bts_currently_indexing
except AttributeError, e:
pass
if not currently_indexing:
key = model.key()
if key:
taskqueue.Task(url='/bts/index-model', params=dict(key=key)).add(queue_name=
'index-model')
def request_removal(model):
"""Request that the supplied model object be removed from the index
asynchronously. This is the preferred mechanism for removing a model from
the index.
"""
descriptor = None
try:
descriptor = model.bts_fields
except:
pass
if descriptor:
taskqueue.Task(url='/bts/remove-model', params=dict(key=model.key())).add(queue_name=
'index-model')
def remove_model(key):
"""Removes the model identified by the given key from the index, effective
immediately.
"""
updated_hits = list()
for hit in TermHits.all().filter('model_keys =', key):
new_keys = set(hit.model_keys)
new_keys.remove(key)
hit.model_keys = list(new_keys)
updated_hits.append(hit)
db.put(updated_hits)
db.delete(ModelTerms.all(keys_only=True).filter("model =", key).fetch(200))
def purge_processed_updates():
"""Purges processed TermUpdates from the database.
"""
for i in range(0,10):
# Check for any positive value of dt_processed (this will exclude null values)
keys = TermUpdate.all(keys_only=True).filter("dt_processed >", datetime.now() - timedelta(weeks=10000000)).fetch(200)
if len(keys) == 0:
return
db.delete(keys)
def update_model(model, descriptor=None):
"""Update the specified model in the index. This has the effect of queuing
TermUpdates to capture any changes in the terms that are indexed
for this model. At this point, the model is not yet searchable by the new
terms. For that to happen, the merge_updates function must be run in order
to merge the queued TermUpdates into the actual index as TermHits.
This method also takes care of materializing any materialized properties
prior to indexing the model.
Models can optionally define a method called "indexable" to indicate whether
or not the model is eligible to be added to the index. If indexable returns
False, the model will still be materialized, but it will not be included
in the index.
Models can optionally define a method called "term_added" to find out
whenever new terms are added to the index (for this model). For each added
term, term_added will be invoked with a single non-keyword argument
representing the added term. For full text indexed fields, each term will be
a string, and for other fields it will be the same as the datatype of the
source property.
This function also makes sure that the synthetic bts_all field contains
all terms from all indexed fields to allow searching against all fields.
Keyword arguments:
model
The object to add to the index
descriptor (optional)
A list of dictionaries identifying which of the model's properties are
indexable and configuring how they are indexed. If no descriptor is
given, this function attempts to find one under the class attribute
"bts_fields".
Each descriptor dictionary containst the following entries:
field (required)
The name of the property (field) to include in the index
weight (optional)
A positive integer weight used in determining strength of match
during search. Higher weights mean higher priority in search
results. The default weight is 1. On full text fields, individual
words will receive a higher weight based on the number of
occurrences of the word. For example, if weight = 2 and the word
"car" appears 3 times in a field, the word car would be indexed
with a weight of 2 * 3 = 6.
fulltext (optional)
True if the field should be indexed as a full text field. By
default, text properties like String, Text, StringList, etc. are
indexed as full-text fields and all other properties (numeric,
boolean, etc.) are indexed in their original type. To force a non-
text field to be indexed as full text, set this to True. The default
value is False.
startswith (optional)
Applies only to full text indexed fields. If True, terms will be
stored for all starting sequences of each word to allow matching
on starting sequences (e.g. for type-ahead completions). For
example, the word "bicycle" would be indexed as:
"b", "bi", "bic", "bicy", "bicyc", "bicycl" and "bicycle"
stem (optional)
True if field should be stemmed. This is useful for text fields
where matching should be performed on word roots instead of whole
words. For example, if you want a search for "walking" in include
fields that have the term "walker", set stem to True. The default
value is True.
keepunstemmed (optional)
Applies only to fields indexed as full text and stemmed. If True,
the indexed terms will include both the stemmed and the unstemmed
versions of words in the original field.
"""
if not descriptor:
try:
descriptor = model.__class__.bts_fields
except AttributeError, e:
raise Exception("To index a model object, please specify a descriptor when calling index or as the property 'bts_fields' on the model class itself")
model_class_name = model.__class__.__name__
was_materialized = False
for (name, property) in model.properties().iteritems():
# materialize all properties before we get into a transaction
materialized_from = None
try:
materialized_from = property.bts_materialized_from
except AttributeError:
# This is not a materialized property, just ignore it
pass
if materialized_from:
setattr(model, name, getattr(model, materialized_from))
was_materialized = True
else:
getattr(model, name)
if was_materialized:
# Some properties were materialized, save model
model.bts_currently_indexing = True
model.put()
indexable = True
try:
# Identify whether the model is indexable based on its indexable method
indexable = model.indexable()
except AttributeError, e:
# The model does not define an indexable method, just continue
pass
if indexable:
# pre-load properties outside of transaction (to make sure references are
# initialized
for descriptor_entry in descriptor:
field_name = descriptor_entry.get('field', None)
if not field_name:
raise Exception("Descriptor entry must include a value for the key 'field'")
# read value before we get into transaction
getattr(model, field_name)
# now process TermUpdates in transaction
added_terms = db.run_in_transaction(update_terms, model,
model_class_name, descriptor)
try:
# If model defines a term_added callback, invoke it for every
# added term.
term_added = model.term_added
for term in added_terms:
term_added(term)
except AttributeError, e:
# model does not provide a term_added method, ignore
pass
def update_terms(model, model_class_name, descriptor):
"""Updates the specified model in the index
"""
added_terms = set()
for descriptor_entry in descriptor:
# Process each indexed field
field_name = descriptor_entry.get('field', None)
weight = int(descriptor_entry.get('weight', 1))
field = Field(model_class_name, field_name)
value = getattr(model, field_name)
if value:
model_terms = ModelTerms.all().ancestor(model).filter('field =',
unicode(field)).get()
if not model_terms:
# Create a ModelTerms to keep track of which terms have
# been indexed for this field on this model.
model_terms = ModelTerms(model=model, field=field)
index_as_text = isinstance(value, basestring) or \
descriptor_entry.get('fulltext', False)
if index_as_text:
# Index this field as a full text field
field_added_terms = index_full_text(descriptor_entry,
model, weight, field, value, model_terms)
for term in field_added_terms:
added_terms.add(term.split(TERM_DELIMITER)[1])
else:
# Index this field using its native type
added_terms.update(index_plain(model, weight, field,
value, model_terms))
return added_terms
def index_full_text(descriptor_entry, model, weight, field, value,
model_terms):
"""Indexes the field as a full-text field (tokenized, stemmed, etc.).
This method does not actually update the index but simply posts TermUpdates
which can later be merged into the index.
"""
index_startswith_entries = descriptor_entry.get("startswith", False)
words = tokenize(value,
stemmed=descriptor_entry.get('stem', True),
keep_unstemmed=descriptor_entry.get('keepunstemmed', False))
word_frequencies = dict()
for word in words:
word_frequencies[word] = word_frequencies.get(word, 0) + 1
if index_startswith_entries:
# Derive starting sequences for the word
for i in range(1, len(word)):
fragment = word[0:i]
word_frequencies[fragment] = word_frequencies.get(fragment,
0) + 1
all_terms = set()
for (word, frequency) in word_frequencies.iteritems():
# Encode term frequency and word into a single string
term = "%s%s%s" % (frequency, TERM_DELIMITER, word)
all_terms.add(term)
old_terms = set(model_terms.terms)
added_terms = all_terms - old_terms
removed_terms = old_terms - all_terms
all_field = Field(field.model_class_name, 'bts_all')
for term in added_terms:
# Store the term under its own field and the bts_all field
(frequency, word) = term.split(TERM_DELIMITER)
TermUpdate(field=field, weight=min(weight * int(frequency), 10),
model=model, term_string=word).put()
TermUpdate(field=all_field, weight=min(weight * int(frequency),
10), model=model, term_string=word).put()
for term in removed_terms:
if TERM_DELIMITER in term:
# Delete removed terms from their own fields and the bts_all field
(frequency, word) = term.split(TERM_DELIMITER)
TermUpdate(remove=True, field=field, weight=min(weight * int(frequency),
10), model=model, term_string=word).put()
TermUpdate(remove=True, field=all_field, weight=min(weight *
int(frequency), 10), model=model, term_string=
word).put()
model_terms.terms = list(all_terms)
model_terms.put()
return added_terms
def index_plain(model, weight, field, value, model_terms):
"""Indexes the field as a plain field (no text processing).
The original data type will be maintained if possible. Currently,
boolean, integer, date, datetime and reference (db.Model) properties are
supported.
This method does not actually update the index but simply posts TermUpdates
which can later be merged into the index.
"""
values = None
value_strings = []
if isinstance(value, list):
values = value
else:
values = [value]
for value in values:
if value:
value_string = None
if isinstance(value, db.Model):
value_string = unicode(value.key())
else:
value_string = unicode(value)
if value_string not in model_terms.terms:
if isinstance(value, db.Model) or isinstance(value, db.Key):
TermUpdate(field=field, weight=weight, model=model,
term_reference=value).put()
elif isinstance(value, bool):
TermUpdate(field=field, weight=weight, model=model,
term_bool=value).put()
elif isinstance(value, int):
TermUpdate(field=field, weight=weight, model=model,
term_integer=value).put()
elif isinstance(value, date):
TermUpdate(field=field, weight=weight, model=model,
term_date=value).put()
elif isinstance(value, datetime):
TermUpdate(field=field, weight=weight, model=model,
term_datetime=value).put()
else:
TermUpdate(field=field, weight=weight, model=model,
term_string=unicode(value)).put()
value_strings.append(value_string)
model_terms.terms = value_strings
model_terms.put()
return value_strings
def noop_stop_condition(current_term_value):
return False
def request_merge_updates(term_type, **kwargs):
"""Requests that updates for the specified term_type (e.g. term_string) be
merged into the index asynchronously.
"""
params = dict(term_type=term_type)
params.update(**kwargs)
queue_params = dict(url='/bts/merge-updates', params=params)
taskqueue.Task(**queue_params).add(queue_name='merge-updates')
def merge_updates(term_type, stop_condition=noop_stop_condition,
additional_filters=[], **kwargs):
"""Merge TermUpdates of the specified term_type into the index by writing
out the necessary TermHits objects. When a term is added to the index, the
logic is roughly as follows:
If adding a term:
If this term already appears in the index for another model
Add the new model's key to the existing TermHits.
If this term does not already appear in the index
Add a new TermHits for this term
If removing a term:
Remove the model's key from the existing TermHits object
If there are multiple pending TermUpdates for the same field and the same
value, this function will batch the updates together to improve performance.
This function limits its runtime to about 15 seconds to avoid running into
App Engine timeouts. If the 15 second limit is hit, this function will
re-queue itself for subsequent processing. This continues until all updates
are merged.
Keyword arguments:
term_type
The type of term ('term_string', 'term_reference', 'term_integer',
'term_bool', 'term_date' or 'term_datetime')
stop_condition
A function that takes the value of the last merged term and returns
True if merging should stop or False if it should continue. Defaults
to a noop_stop_condition that allows continuation ad-infinitum.
additional_filters
Additional filters to apply to the query for TermUpdates that can be
used to limit the scope of what is merged. Each filter is a tuple of
(expression, value) just as would be specified to the filter method
on a db Query.
"""
logging.debug("Merging index updates")
start_time = datetime.now()
were_updates_processed = False
# merge updates one field at a time
field_query = TermUpdate.all().filter('dt_processed =', None)
field_query.filter('%s !=' % term_type, None)
for filter in additional_filters:
field_query.filter(*filter)
field_query.order(term_type)
while True:
# Limit our run to around 15 seconds
if datetime.now() - start_time > MAX_RUNTIME:
break
field_update = field_query.get()
if not field_update or stop_condition(getattr(field_update,
term_type)):
break
field = field_update.field
logging.debug("Merging index updates for field %s" % field)
added_keys = dict()
removed_keys = dict()
processed_updates = []
prior_update = None
# Find the next 100 unprocessed TermUpdates for this field
query = TermUpdate.all().filter('dt_processed =', None)
query.filter('%s !=' % term_type, None)
query.filter('field =', str(field))
for filter in additional_filters:
query.filter(*filter)
query.order(term_type)
updates = query.fetch(100)
number_of_updates = len(updates)
# For each TermUpdate, accumulate the keys
for i in range(0, number_of_updates + 1):
# Limit our run to around 15 seconds
if datetime.now() - start_time > MAX_RUNTIME:
break
update = None
if i < number_of_updates:
update = updates[i]
current_term_value = None
prior_term_value = None
if update:
current_term_value = getattr(update, term_type)
if prior_update:
prior_term_value = getattr(prior_update, term_type)
# Once we're on a new term (or the end of our run), write updates into the index as TermHits
if i > 0 and (i == number_of_updates or prior_update and
current_term_value != prior_term_value or
stop_condition(current_term_value)):
updated_hits = set()
were_updates_processed = True
existing_hits_query = TermHits.all()
existing_hits_query.filter('%s =' % term_type,
prior_term_value)
existing_hits_query.filter('field =', str(field))
existing_hits_query.order('-dt_updated')
existing_hits = existing_hits_query.fetch(1000)
for existing_hit in existing_hits:
# Add keys to existing TermHits where possible
keys_to_add = added_keys.get(existing_hit.weight,
None)
if keys_to_add and len(keys_to_add) + len(existing_hit.model_keys) < \
3000:
keys_to_add.update(set(existing_hit.model_keys))
existing_hit.model_keys = list(keys_to_add)
updated_hits.add(existing_hit)
added_keys.pop(existing_hit.weight)
for existing_hit in existing_hits:
# Remove keys from existing TermHits where necessary
keys_to_remove = removed_keys.get(existing_hit.weight,
None)
if keys_to_remove:
existing_hit.model_keys = list(set(existing_hit.model_keys) -
keys_to_remove)
updated_hits.add(existing_hit)
for (weight, model_keys) in added_keys.iteritems():
# Write new TermHits where necessary
model_keys = model_keys - removed_keys.get(weight,
set())
hits_params = dict(field=prior_update.field, weight=
weight, model_keys=list(model_keys))
hits_params[str(term_type)] = prior_term_value
updated_hits.add(TermHits(**hits_params))
# Flush all updates to the datastore
db.put(processed_updates)
# Reset accumulator variables
added_or_updated_hits = []
deleted_hits = []
for hit in updated_hits:
if len(hit.model_keys) == 0:
if hit.is_saved():
deleted_hits.append(hit)
else:
added_or_updated_hits.append(hit)
db.put(added_or_updated_hits)
db.delete(deleted_hits)
added_keys = dict()
removed_keys = dict()
processed_updates = []
# If we're at the end of our run, stop processing
if i == number_of_updates or stop_condition(current_term_value):
break
accumulator = removed_keys if update.remove else added_keys
keys = accumulator.get(update.weight, set())
try:
keys.add(update.model.key())
except:
# Exception may be thrown if referenced model has been deleted
# Just ignore it
pass
accumulator[update.weight] = keys
prior_update = update
update.dt_processed = datetime.now()
processed_updates.append(update)
if were_updates_processed:
# Add this merge_updates operation to the queue for further processing
request_merge_updates(term_type, **kwargs)
def merge_string_updates(start_letter, end_letter):
"""Merge updates for string (full-text) terms.
This is basically the same as merge_updates except that the merge is
restricted to words beginning with a letter in the specified range
(inclusive on both start_letter and end_letter).
"""
def stop_condition(current_term_value):
return end_letter and current_term_value > end_letter
return merge_updates('term_string', stop_condition, [('term_string >=',
start_letter)], start_letter=start_letter,
end_letter=end_letter)
def add_to_index(**kwargs):
"""This is a handler for a Django signal. It can be used to index objects
whenever they're updated by connecting to the post_save signal"""
model = kwargs['instance']
request_indexing(model)
def remove_from_index(**kwargs):
"""This is a handler for a Django signal. It can be used to index objects
whenever they're deleted by connecting to the post_delete signal"""
model = kwargs['instance']
request_removal(model)
|
from rest_framework import serializers
from rest_framework.serializers import Serializer, ModelSerializer
from TestOnline.models import Company
|
"""
API for Game Board that allows interaction with boards.
"""
import json
import random
from time import sleep
import uuid
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.throttling import AnonRateThrottle
from rest_framework.throttling import UserRateThrottle
from rest_framework.decorators import throttle_classes
from game_board.api import utils
from .. import config
from ..llist.llist import doAction
@api_view(['GET'])
def api_overview(request):
"""
Overview of the API calls exist.
:param request:
:return: Response, list of API URLs.
"""
api_urls = {
'Start Game': '/start_game/<str:difficulty>/<str:player_ids>/<str:data_structures>',
'Game Board': '/board/<str:game_id>',
'Dig Tunnel': '/dig_tunnel/<str:game_id>/<str:origin>/<str:destination>',
'Dig Chamber': '/dig_chamber/<str:game_id>/<str:origin>/<str:move_ant>/<str:ant>',
'Fill Chamber': '/fill_chamber/<str:game_id>/<str:to_fill>',
'Spawn Ant': '/spawn_ant/<str:game_id>',
'Forage': '/forage/<str:game_id>/<str:difficulty>/<str:dest>',
'Move Food': '/move_food/<str:game_id>/<str:start>/<str:dest>',
'Move Ant': '/move_ant/<str:game_id>/<str:start>/<str:dest>',
}
return Response(api_urls)
@api_view(['GET'])
@throttle_classes([AnonRateThrottle])
@throttle_classes([UserRateThrottle])
def start_game(request, difficulty, player_ids, data_structures):
"""
Creates a new game board.
:param request:
:param difficulty: game difficulty level
:param player_ids: string of player IDs, comma seperated if more than one
:param data_structures: string of data structures, comma seperated if more than one
:return game board id:
"""
# Chosen difficulty does not exist
if difficulty not in config.DIFFICULTY_LEVELS:
return Response({'error': 'Difficulty level not found!',
'options': config.DIFFICULTY_LEVELS},
status=status.HTTP_400_BAD_REQUEST)
# Convert the string fields into list. Separate by comma if provided
player_ids_temp = player_ids.split(',')
data_structures = data_structures.split(',')
player_ids = list()
for pl_id in player_ids_temp:
pl_id = str(pl_id).strip()
# If empty player_ids is passed
if len(pl_id) == 0:
random_player = 'RedPanda_' + str(uuid.uuid1())[:5]
while random_player in player_ids:
random_player = 'RedPanda_' + str(uuid.uuid1())[:5]
player_ids.append(random_player)
else:
player_ids.append(pl_id)
# Check if the number of players request is valid
if len(player_ids) > config.LLIST_MAX_NUM_PLAYERS:
return Response({'error': 'Too many players requested!',
'options': config.LLIST_MAX_NUM_PLAYERS},
status=status.HTTP_400_BAD_REQUEST)
# Create new game board JSON (dict), and store it in the database
new_board = utils.new_board(difficulty, player_ids, data_structures)
response_status = utils.create_board_db(new_board)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'game_id': response_status['game_id']})
@api_view(['GET'])
def board(request, game_id):
"""
Returns the current game board state.
:param request:
:param game_id: unique identifier of the board
:return game board JSON:
"""
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_400_BAD_REQUEST)
# hide the UID used by data structure backend from user
# del response_status['game_board']['graph']['uid']
return Response(response_status['game_board'])
@api_view(['GET'])
def dig_tunnel(request, game_id, origin, destination):
"""
Attempts to dig a tunnel from the requested chamber to a requested destination
:param game_id: unique identifier of the board
:param origin: the chamber that the player wishes to dig from
:param destination: the place that the player wishes to dig to (chamber name, 'surface', or 'none'
"""
# convert string to actual value
if destination == 'None':
destination = None
# Game must exist
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# origin and destination MUST be different
if origin == destination:
return Response({'invalid_action': 'origin cannot match destination'},
status=status.HTTP_400_BAD_REQUEST)
# Player must still have dig 'energy' for that day
if board['time_tracks']['dig_tunnel_track'] == 0:
return Response({'invalid_action': 'no more dig tunnel moves left!'},
status=status.HTTP_400_BAD_REQUEST)
# Origin must exist
if origin != 'surface' and origin not in board['graph']['chambers']:
return Response({'invalid_action': 'origin does not exist'},
status=status.HTTP_400_BAD_REQUEST)
# If destination is NOT 'none', it must exist (chamber OR surface)
if destination is not None and destination not in board['graph']['chambers']:
return Response({'invalid_action': 'destination does not exist'},
status=status.HTTP_400_BAD_REQUEST)
# If Origin is surface, colony_entrance MUST be False
if origin == 'surface' and board['colony_entrance'] is True:
return Response({'invalid_action': 'colony_entrance already exists'},
status=status.HTTP_400_BAD_REQUEST)
# There must be at least one ant at origin
if origin == 'surface' and board['total_surface_ants'] == 0:
return Response({'invalid_action': 'no ants on surface'},
status=status.HTTP_400_BAD_REQUEST)
if board['graph']['chambers'][origin]['num_ants'] == 0:
return Response({'invalid_action': 'no ants at origin'},
status=status.HTTP_400_BAD_REQUEST)
# If destination is NOT none, there must be an ant at the destination
if destination is not None and board['graph']['chambers'][destination]['num_ants'] == 0:
return Response({'invalid_action': 'no ants at destination'},
status=status.HTTP_400_BAD_REQUEST)
# Origin chamber must NOT already have an exit tunnel, except if it's to the surface
if board['graph']['chambers'][origin]['tunnels']['next'] is not None and \
board['graph']['chambers'][origin]['tunnels']['next'] != 'surface':
return Response({'invalid_action': 'exit tunnel exists'},
status=status.HTTP_400_BAD_REQUEST)
# destination must NOT already have an entrance tunnel
if destination is not None and board['graph']['chambers'][destination]['tunnels']['prev'] is not None:
return Response({'invalid_action': 'exit tunnel exists'},
status=status.HTTP_400_BAD_REQUEST)
# if ALL checks are passed, create new tunnel and update ALL relevant gameboard parameters
# num_tunnels
board['graph'] = doAction(board['graph'], ('dig_tunnel', origin, destination))
if origin == 'surface':
board['colony_entrance'] = True
if destination == 'surface':
board['colony_exit'] = True
board['time_tracks']['dig_tunnel_track'] -= 1
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
@api_view(['GET'])
def dig_chamber(request, game_id, origin, move_ant, ant=None):
"""
Attempts to dig a new chamber off of a current dead-end tunnel
:param game_id: unique identifier of the board
:param origin: the chamber that the player wishes to dig from
:param move_ant: whether the player wishes to move the ant into the new chamber
"""
# checklist
if ant == 'None':
ant = None
# Check if game exists
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# Check for dig chamber energy
if board['time_tracks']['dig/fill_chamber'] == 0:
return Response({'invalid_action': 'no more dig chamber moves left!'},
status=status.HTTP_400_BAD_REQUEST)
# Check if move_ant is a valid input
if move_ant != 'yes':
if move_ant != 'no':
return Response({'invalid_action': 'invalid free move request!'},
status=status.HTTP_400_BAD_REQUEST)
# Check if origin exists
if origin != 'surface' and origin not in board['graph']['chambers']:
return Response({'invalid_action': 'origin does not exist'},
status=status.HTTP_400_BAD_REQUEST)
# check if origin contains at least one ant
if origin == 'surface' and board['total_surface_ants'] == 1:
return Response({'invalid_action': 'no ants on surface'},
status=status.HTTP_400_BAD_REQUEST)
if board['graph']['chambers'][origin]['num_ants'] == 0:
return Response({'invalid_action': 'no ants at origin'},
status=status.HTTP_400_BAD_REQUEST)
# Check if origin contains an exit tunnel
if board['graph']['chambers'][origin]['tunnels']['next'] is not None:
return Response({'invalid_action': 'no available tunnel from origin'},
status=status.HTTP_400_BAD_REQUEST)
# if origin contains a next tunnel, check if current next is 'none'
# if board['graph']['tunnels'][origin][0] == 2 and board['graph']['tunnels'][origin][2] is not None:
# return Response({'invalid_action': 'no available tunnel from origin'},
# status=status.HTTP_400_BAD_REQUEST)
# if at this point, dig request is valid: update ALL relevant game board variables
newchamberid = 'chamber' + str(len(board['graph']['chambers'].keys()) + 1)
board['graph'] = doAction(board['graph'], ('dig_chamber', origin))
board['total_chambers'] += 1
if move_ant == 'yes' and ant is not None:
board['graph'] = doAction(board['graph'], ('move_ant', ant, newchamberid))
board['time_tracks']['dig/fill_chamber'] -= 1
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
@api_view(['GET'])
def fill_chamber(request, game_id, to_fill):
"""
Attempts to 'fill' (delete) a chamber and all associated tunnels
:param game_id: unique identifier of the board
:param to_fill: the chamber that the player wishes to delete
:return game board JSON:
"""
# Check if game exists
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# Check if to_fill is surface (cannot fill in surface)
if to_fill == 'surface':
return Response({'invalid_action': 'cannot fill in surface'},
status=status.HTTP_400_BAD_REQUEST)
# Check if to_fill exists
if to_fill not in board['graph']['chambers']:
return Response({'invalid_action': 'chamber does not exist'},
status=status.HTTP_400_BAD_REQUEST)
# Check for fill chamber energy
if board['time_tracks']['dig/fill_chamber'] == 0:
return Response({'invalid_action': 'no more fill chamber moves left!'},
status=status.HTTP_400_BAD_REQUEST)
# Check if to_fill has any food in it
if (board['graph']['chambers'][to_fill]['food']['crumb'] > 0) \
or (board['graph']['chambers'][to_fill]['food']['berry'] > 0) \
or (board['graph']['chambers'][to_fill]['food']['donut'] > 0):
return Response({'invalid_action': 'There is food in this chamber!'},
status=status.HTTP_400_BAD_REQUEST)
# Check if there is at least one ant at the prev chamber
previous = board['graph']['chambers'][to_fill]['tunnels']['prev']
if board['graph']['chambers'][previous]['num_ants'] == 0:
return Response({'invalid_action': 'No ant in previous chamber!'},
status=status.HTTP_400_BAD_REQUEST)
# Check if there is a next chamber, and if so, if there is at least one ant in it
if board['graph']['chambers'][to_fill]['tunnels']['next'] is not None:
next_chamber = board['graph']['chambers'][to_fill]['tunnels']['next']
if board['graph']['chambers'][next_chamber]['num_ants'] == 0:
return Response({'invalid_action': 'No ant in next chamber!'},
status=status.HTTP_400_BAD_REQUEST)
# If at this point, all checks are made. Update gameboard
# link up prev and next
board['graph'] = doAction(board['graph'], ('fill_chamber', to_fill))
board['total_chambers'] -= 1
board['time_tracks']['dig/fill_chamber'] -= 1
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
##### NOTE: THIS IMPLEMENTATION WILL LIKELY CHANGE IN THE NEAR FUTURE
# HOWEVER, GENERAL LOGIC SHOULD STAY THE SAME
# @api_view(['GET'])
# def move_ant(request, game_id, origin):
# Checklist
# Check if game exists
# Check if origin exists
# Check if ant exists in origin
# Check if origin has an exit tunnel
# if so, check if origin's exit tunnel leads to a valid destination
# Check if destination chamber is under attack
# if so, check to see if ant is carrying food (cannot bring food into attacked chamber)
# At this point, requested move is valid. Update ALL related gameboard values and return
@api_view(['GET'])
def spawn_ant(request, game_id):
"""
Spawns an ant given the game ID
:param game_id: unique identifier of the board
:return game board JSON:
"""
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
if not board['queen_at_head']:
return Response({'invalid_action': 'lost queen'},
status=status.HTTP_400_BAD_REQUEST)
# Make sure there is enough food to spawn a new ant
if board['total_food'] < config.ANT_SPAWN_VAL:
return Response({'invalid_action': 'not enough food'},
status=status.HTTP_400_BAD_REQUEST)
# Take away food, if they have food that can be
curr_food_types = board['total_food_types']
# Find the first chamber with enough food
chamber_with_food = None
for chamber in board['graph']['chambers'].keys():
if chamber != 'surface' and board['graph']['chambers'][chamber]['food']['total'] >= config.ANT_SPAWN_VAL:
chamber_with_food = chamber
break
# If player has a donut take it
if curr_food_types[config.FORAGE_TYPES[2]] > 0:
board['total_food_types'][config.FORAGE_TYPES[2]] -= 1
board['total_food'] -= config.ANT_SPAWN_VAL
board['graph']['chambers'][chamber_with_food]['food'][config.FORAGE_TYPES[2]] -= 1
board['graph']['chambers'][chamber_with_food]['food']['total'] -= config.ANT_SPAWN_VAL
# If player has at least one berry and one crumb, take one of each
elif curr_food_types[config.FORAGE_TYPES[1]] > 0 and curr_food_types[config.FORAGE_TYPES[0]] > 0:
board['total_food_types'][config.FORAGE_TYPES[1]] -= 1
board['total_food_types'][config.FORAGE_TYPES[0]] -= 1
board['total_food'] -= config.ANT_SPAWN_VAL
board['graph']['chambers'][chamber_with_food]['food'][config.FORAGE_TYPES[1]] -= 1
board['graph']['chambers'][chamber_with_food]['food'][config.FORAGE_TYPES[0]] -= 1
board['graph']['chambers'][chamber_with_food]['food']['total'] -= config.ANT_SPAWN_VAL
# If player only has crumbs take it
elif curr_food_types[config.FORAGE_TYPES[0]] >= config.ANT_SPAWN_VAL:
board['total_food_types'][config.FORAGE_TYPES[0]] -= config.ANT_SPAWN_VAL
# If this case is reached, the player has enough food, but only in berry form (not divisible by 3)
elif curr_food_types[config.FORAGE_TYPES[1]] >= 2:
board['total_food_types'][config.FORAGE_TYPES[1]] -= 2
board['total_food_types'][config.FORAGE_TYPES[0]] += 1
board['total_food'] -= config.ANT_SPAWN_VAL
board['graph']['chambers'][chamber_with_food]['food'][config.FORAGE_TYPES[1]] -= 2
board['graph']['chambers'][chamber_with_food]['food'][config.FORAGE_TYPES[1]] += 1
board['graph']['chambers'][chamber_with_food]['food']['total'] -= config.ANT_SPAWN_VAL
else:
return Response({'invalid_action': 'error occurred'},
status=status.HTTP_400_BAD_REQUEST)
# if control reaches here, then spawning an ant is successful. Update both total and surface ant values.
board['total_ants'] += 1
board['total_surface_ants'] += 1
action = ['spawn_ant']
board['graph'] = doAction(board['graph'], action)
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
@api_view(['GET'])
def forage(request, game_id, difficulty, dest):
"""
Spawns an ant given the game ID
:param game_id: unique identifier of the board
:param difficulty: game difficulty
:param ant_loc: the chamber in which the ant is located
:param dest: the chamber where the food should be placed
:return game board JSON:
"""
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# Make sure that the destination chamber is in the colony
if dest not in board['graph']['chambers']:
return Response({'invalid_action': 'dest dne'},
status=status.HTTP_400_BAD_REQUEST)
# Make sure that the surface is connected to the dest somehow.
curr_chamber = 'surface'
connected = False
while board['graph']['chambers'][curr_chamber]['tunnels']['next'] is not None and \
board['graph']['chambers'][curr_chamber]['tunnels']['next'] != 'surface':
if board['graph']['chambers'][curr_chamber]['tunnels']['next'] == dest:
connected = True
break
curr_chamber = board['graph']['surface'][curr_chamber]['tunnels']['next']
if connected == False:
return Response({'invalid_action': 'dest unreachable'},
status=status.HTTP_400_BAD_REQUEST)
# If there are no chambers player can't forage
if board['total_chambers'] == 0:
return Response({'invalid_action': 'no chambers'},
status=status.HTTP_400_BAD_REQUEST)
# If there is no queen then game over actually
if board['queen_at_head'] == False:
return Response({'invalid_action': 'lost queen'},
status=status.HTTP_400_BAD_REQUEST)
# If there are no worker ants on the surface.
if board['total_surface_ants'] == 1:
return Response({'invalid_action': 'no surface ants'},
status=status.HTTP_400_BAD_REQUEST)
# If the requested chamber is under attack return error
if board['graph']['chambers'][dest]['under_attack']:
return Response({'invalid_action': 'under attack'},
status=status.HTTP_400_BAD_REQUEST)
# If the player can't make a forage move, return error
if board['time_tracks']['move/forage'] == 0:
return Response({'invalid_action': 'no time'},
status=status.HTTP_400_BAD_REQUEST)
# choose a random number then choose the forage type that will be returned
rand_food = random.randint(0, 100)
crumb_chance = config.FORAGE_CHANCE[difficulty][config.FORAGE_TYPES[0]]
berry_chance = config.FORAGE_CHANCE[difficulty][config.FORAGE_TYPES[1]]
donut_chance = config.FORAGE_CHANCE[difficulty][config.FORAGE_TYPES[2]]
attack_chance = config.FORAGE_CHANCE[difficulty][config.FORAGE_TYPES[3]]
# Check if crumb was chosen
if rand_food >= 0 and rand_food < crumb_chance:
forage_result = config.FORAGE_TYPES[0]
# Check if berry was chosen
if rand_food >= crumb_chance and rand_food < berry_chance:
forage_result = config.FORAGE_TYPES[1]
# Check if donut was chosen
if rand_food >= berry_chance and rand_food < donut_chance:
forage_result = config.FORAGE_TYPES[2]
# Check if attack was chosen
if rand_food >= donut_chance and rand_food < attack_chance:
forage_result = config.FORAGE_TYPES[3]
# If the forage resulted in the chamber coming under attack,
# Then reflect the change in the board
if forage_result == config.FORAGE_TYPES[3]:
board['graph']['chambers'][dest]['under_attack'] = True
board['total_under_attack'] += 1
board['total_surface_ants'] -= 1
board['graph']['chambers']['surface']['num_ants'] -= 1
board['graph']['chambers'][dest]['num_ants'] += 1
# Otherwise, put the food in the requested chamber, move the ant, and update the board
else:
# Change food in requested chamber
board['graph']['chambers'][dest]['food'][forage_result] += 1
board['graph']['chambers'][dest]['food']['total'] += config.FOOD_VALUE[forage_result]
# Change food stats on for the game board
board['total_food_types'][forage_result] += 1
board['total_food'] += config.FOOD_VALUE[forage_result]
# Move the ant from og spot to new spot
board['total_surface_ants'] -= 1
board['graph']['chambers']['surface']['num_ants'] -= 1
board['graph']['chambers'][dest]['num_ants'] += 1
# Decrement Move/Forage time track
board['time_tracks']['move/forage'] -= 1
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
# essentially the same as move food, but simplified
@api_view(['GET'])
def move_ant(request, game_id, start, dest):
"""
Moves ant from start chamber to destination chamber
:param game_id: unique identifier of the board
:param start: The chamber to move from.
:param dest: the chamber where to move
:return game board JSON:
"""
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# If start and dest don't exist for some reason
if (start not in board['graph']['chambers']) or (dest not in board['graph']['chambers']):
return Response({'invalid_action': 'invalid chambers'},
status=status.HTTP_400_BAD_REQUEST)
# If chambers aren't connected then return
if board['graph']['chambers'][start]['tunnels']['next'] != dest:
return Response({'invalid_action': 'no tunnel'},
status=status.HTTP_400_BAD_REQUEST)
# If there is no queen then game over actually
if board['queen_at_head'] == False:
return Response({'invalid_action': 'game over'},
status=status.HTTP_400_BAD_REQUEST)
# If no ant to move
if board['graph']['chambers'][start]['num_ants'] == 0:
return Response({'invalid_action': 'no ants'},
status=status.HTTP_400_BAD_REQUEST)
# If the player can't make a move, return error
if board['time_tracks']['move/forage'] == 0:
return Response({'invalid_action': 'no time'},
status=status.HTTP_400_BAD_REQUEST)
# if at this point, update all relevant game board values
# Update ant locations
board['graph']['chambers'][start]['num_ants'] -= 1
board['graph']['chambers'][dest]['num_ants'] += 1
if start == 'surface':
board['total_surface_ants'] -= 1
# Decrement Move/Forage time track
board['time_tracks']['move/forage'] -= 1
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
@api_view(['GET'])
def move_food(request, game_id, start, dest):
"""
Moves food from start chamber to destination chamber
:param game_id: unique identifier of the board
:param start: The chamber to move the food from.
:param dest: the chamber where the food should be placed
:return game board JSON:
"""
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# Make sure that the player isn't trying to move to or from surface.
if start == 'surface' or dest == 'surface':
return Response({'invalid_action': 'no food on surface allowed'},
status=status.HTTP_400_BAD_REQUEST)
# If start and dest don't exist for some reason
if start not in board['graph']['chambers'] or dest not in board['graph']['chambers']:
return Response({'invalid_action': 'invalid chambers'},
status=status.HTTP_400_BAD_REQUEST)
# If chambers aren't connected then return
if board['graph']['chambers'][start]['tunnels']['next'] != dest:
return Response({'invalid_action': 'no tunnel'},
status=status.HTTP_400_BAD_REQUEST)
# If there is no queen then game over actually
if not board['queen_at_head']:
return Response({'invalid_action': 'game over'},
status=status.HTTP_400_BAD_REQUEST)
# If there are no chambers player can't move food
if board['total_chambers'] == 1:
return Response({'invalid_action': 'no chambers'},
status=status.HTTP_400_BAD_REQUEST)
# If there are no ants in the start chamber then can't move food
if board['graph']['chambers'][start]['num_ants'] == 0:
return Response({'invalid_action': 'no ants'},
status=status.HTTP_400_BAD_REQUEST)
# If the requested dest chamber is under attack then can't move food
if board['graph']['chambers'][dest]['under_attack']:
return Response({'invalid_action': 'under attack'},
status=status.HTTP_400_BAD_REQUEST)
# If there is no food in the starting chamber then you can't move food
if board['graph']['chambers'][start]['food']['total'] == 0:
return Response({'invalid_action': 'no food'},
status=status.HTTP_400_BAD_REQUEST)
# If the player can't make a move, return error
if board['time_tracks']['move/forage'] == 0:
return Response({'invalid_action': 'no time'},
status=status.HTTP_400_BAD_REQUEST)
# If we were to make it so that the user can only fit up to 6 food in the each
# chamber, this might cause an issue with the fact that ants can only move one spot
# So I will move food to the next chamber, not matter how much food is in the next chamber
# 'Pick up' the first available food of the highest value.
# Only allowing the player to move one piece of food at a time, while having no food
# limit for chambers, still ensures that the player is motivated to not cram all food into
# on chamber.
food_picked_up = ''
value = 0
if board['graph']['chambers'][start]['food']['donut'] > 0:
board['graph']['chambers'][start]['food']['donut'] -= 1
board['graph']['chambers'][start]['food']['total'] -= 3
food_picked_up = 'donut'
value = 3
elif board['graph']['chambers'][start]['food']['berry'] > 0:
board['graph']['chambers'][start]['food']['berry'] -= 1
board['graph']['chambers'][start]['food']['total'] -= 2
food_picked_up = 'berry'
value = 2
elif board['graph']['chambers'][start]['food']['crumb'] > 0:
board['graph']['chambers'][start]['food']['crumb'] -= 1
board['graph']['chambers'][start]['food']['total'] -= 1
food_picked_up = 'crumb'
value = 1
# Put food in the destination chamber
board['graph']['chambers'][dest]['food'][food_picked_up] += 1
board['graph']['chambers'][dest]['food']['total'] += value
# Update ant locations
board['graph']['chambers'][start]['num_ants'] -= 1
board['graph']['chambers'][dest]['num_ants'] += 1
# Decrement Move/Forage time track
board['time_tracks']['move/forage'] -= 1
user_id = board['player_ids']
token = -1
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
|
from pysubparser import parser
from pysubparser.util import time_to_millis
from pydub import AudioSegment
# find segments of conversation
import sys
FIVE_SECONDS = 5000
def get_segments(subtitles):
segments = []
prev_end = -1000000
curr_segment = None
for subtitle in subtitles:
this_start = time_to_millis(subtitle.start)
if this_start - prev_end > FIVE_SECONDS:
if curr_segment != None:
segments.append(curr_segment)
curr_segment = []
curr_segment.append(subtitle)
prev_end = time_to_millis(subtitle.end)
# append last segment
segments.append(curr_segment)
return segments
def print_segment(seg):
print(seg[0].start)
for sub in seg:
print(sub.text)
print(seg[-1].end)
print("------------------------------------")
print("Segment duration: " + str((time_to_millis(seg[-1].end) - time_to_millis(seg[0].start))/1000))
print("====================================")
audio_filename = sys.argv[1]
subtitle_filename = sys.argv[2]
subtitles = parser.parse(subtitle_filename)
segments = get_segments(subtitles)
song = AudioSegment.from_mp3(audio_filename)
folder = "out/"
episode = "e01"
n = 1
for seg in segments:
start = time_to_millis(seg[0].start) - 1000
end = time_to_millis(seg[-1].end) + 1500
cut = song[start:end]
cut.export(folder + episode + "_seg" + str(n) + ".mp3", format="mp3")
print("===== Segment " + str(n) + " ========")
print_segment(seg)
n += 1
|
pin="881120-1068234"
yyyymmdd = "19"+pin[0:6]
num = pin[7:]
print(yyyymmdd)
print(num)
|
import textwrap
import requests_mock
import transaction
from purl import URL
from onegov.form import FormCollection
from onegov.pay import PaymentProviderCollection
def test_setup_stripe(client):
client.login_admin()
assert client.app.default_payment_provider is None
with requests_mock.Mocker() as m:
m.post('https://oauth.example.org/register/foo', json={
'token': '0xdeadbeef'
})
client.get('/payment-provider').click("Stripe Connect")
url = URL(m.request_history[0].json()['url'])
url = url.query_param('oauth_redirect_secret', 'bar')
url = url.query_param('code', 'api_key')
m.post('https://connect.stripe.com/oauth/token', json={
'scope': 'read_write',
'stripe_publishable_key': 'stripe_publishable_key',
'stripe_user_id': 'stripe_user_id',
'refresh_token': 'refresh_token',
'access_token': 'access_token',
})
client.get(url.as_string())
provider = client.app.default_payment_provider
assert provider.title == 'Stripe Connect'
assert provider.publishable_key == 'stripe_publishable_key'
assert provider.user_id == 'stripe_user_id'
assert provider.refresh_token == 'refresh_token'
assert provider.access_token == 'access_token'
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/accounts/stripe_user_id', json={
'business_name': 'Govikon',
'email': 'info@example.org'
})
client.get('/payment-provider').click("Deaktivieren")
assert client.app.default_payment_provider is None
def test_stripe_form_payment(client):
collection = FormCollection(client.app.session())
collection.definitions.add('Donate', definition=textwrap.dedent("""
E-Mail *= @@@
Donation *=
(x) Small (10 CHF)
( ) Medium (100 CHF)
"""), type='custom', payment_method='free')
providers = PaymentProviderCollection(client.app.session())
providers.add(type='stripe_connect', default=True, meta={
'publishable_key': '0xdeadbeef',
'access_token': 'foobar'
})
transaction.commit()
page = client.get('/form/donate')
page.form['e_mail'] = 'info@example.org'
page = page.form.submit().follow()
assert "Totalbetrag" in page
assert "10.00 CHF" in page
assert "+ 0.59" not in page
assert "Online zahlen und abschliessen" in page
button = page.pyquery('.checkout-button')
assert button.attr('data-stripe-amount') == '1000'
assert button.attr('data-stripe-currency') == 'CHF'
assert button.attr('data-stripe-email') == 'info@example.org'
assert button.attr('data-stripe-description') == 'Donate'
assert button.attr('data-action') == 'submit'
assert button.attr('data-stripe-allowrememberme') == 'false'
assert button.attr('data-stripe-key') == '0xdeadbeef'
with requests_mock.Mocker() as m:
charge = {
'id': '123456'
}
m.post('https://api.stripe.com/v1/charges', json=charge)
m.get('https://api.stripe.com/v1/charges/123456', json=charge)
m.post('https://api.stripe.com/v1/charges/123456/capture', json=charge)
page.form['payment_token'] = 'foobar'
page.form.submit().follow()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/charges/123456', json={
'id': '123456',
'captured': True,
'refunded': False,
'paid': True,
'status': 'foobar'
})
client.login_admin()
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert "Bezahlt" in ticket
payments = client.get('/payments')
assert "FRM-" in payments
assert "Stripe Connect" in payments
assert "info@example.org" in payments
assert "9.41 CHF" in payments
assert "0.59" in payments
def test_stripe_charge_fee_to_customer(client):
collection = FormCollection(client.app.session())
collection.definitions.add('Donate', definition=textwrap.dedent("""
E-Mail *= @@@
Donation *=
(x) Small (10 CHF)
( ) Medium (100 CHF)
"""), type='custom', payment_method='free')
providers = PaymentProviderCollection(client.app.session())
providers.add(type='stripe_connect', default=True, meta={
'publishable_key': '0xdeadbeef',
'access_token': 'foobar',
'user_id': 'foobar'
})
transaction.commit()
client.login_admin()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/accounts/foobar', json={
'business_name': 'Govikon',
'email': 'info@example.org'
})
page = client.get('/payment-provider').click("Einstellungen", index=1)
assert 'Govikon / info@example.org' in page
page.form['charge_fee_to_customer'] = True
page.form.submit()
page = client.get('/form/donate')
page.form['e_mail'] = 'info@example.org'
page = page.form.submit().follow()
assert "Totalbetrag" in page
assert "10.00 CHF" in page
assert "+ 0.61 CHF Kreditkarten-Gebühr" in page
assert "Online zahlen und abschliessen" in page
button = page.pyquery('.checkout-button')
assert button.attr('data-stripe-amount') == '1061'
with requests_mock.Mocker() as m:
charge = {
'id': '123456'
}
m.post('https://api.stripe.com/v1/charges', json=charge)
m.get('https://api.stripe.com/v1/charges/123456', json=charge)
m.post('https://api.stripe.com/v1/charges/123456/capture', json=charge)
page.form['payment_token'] = 'foobar'
page.form.submit().follow()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/charges/123456', json={
'id': '123456',
'captured': True,
'refunded': False,
'paid': True,
'status': 'foobar'
})
client.login_admin()
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert "Bezahlt" in ticket
payments = client.get('/payments')
assert "FRM-" in payments
assert "Stripe Connect" in payments
assert "info@example.org" in payments
assert "10.00" in payments
assert "0.61" in payments
|
import OpenPNM as op
import time
st = time.time()
from OpenPNM.Geometry import models as gm
#==============================================================================
'''Build Topological Network'''
#==============================================================================
pn = op.Network.Cubic(shape=[5,6,7],spacing=0.0001,name='net')
pn.add_boundaries()
#==============================================================================
'''Build Geometry'''
#==============================================================================
Ps = pn.pores('boundary',mode='not')
Ts = pn.find_neighbor_throats(pores=Ps,mode='intersection',flatten=True)
geom = op.Geometry.Toray090(network=pn,pores=Ps,throats=Ts)
geom.models.add(propname='throat.length',model=gm.throat_length.straight)
Ps = pn.pores('boundary')
Ts = pn.find_neighbor_throats(pores=Ps,mode='not_intersection')
boun = op.Geometry.Boundary(network=pn,pores=Ps,throats=Ts)
#==============================================================================
'''Build Phases'''
#==============================================================================
air = op.Phases.Air(network=pn,name='air')
air['pore.Dac'] = 1e-7 # Add custom properties directly
water = op.Phases.Water(network=pn,name='water')
#==============================================================================
'''Build Physics'''
#==============================================================================
Ps = pn.pores()
Ts = pn.throats()
phys_water = op.Physics.Standard(network=pn,phase=water,pores=Ps,throats=Ts)
phys_air = op.Physics.Standard(network=pn,phase=air,pores=Ps,throats=Ts)
#Add some additional models to phys_air
phys_air.models.add(model=op.Physics.models.diffusive_conductance.bulk_diffusion,
propname='throat.gdiff_ac',
pore_diffusivity='pore.Dac')
#==============================================================================
'''Begin Simulations'''
#==============================================================================
'''Perform a Drainage Experiment (OrdinaryPercolation)'''
#------------------------------------------------------------------------------
OP_1 = op.Algorithms.OrdinaryPercolation(network=pn,invading_phase=water,defending_phase=air)
Ps = pn.pores(labels=['bottom_boundary'])
OP_1.run(inlets=Ps)
OP_1.return_results(Pc=7000)
#------------------------------------------------------------------------------
'''Perform Invasion Percolation'''
#------------------------------------------------------------------------------
inlets = pn.pores('bottom_boundary')
outlets = pn.pores('top_boundary')
IP_1 = op.Algorithms.InvasionPercolation(network=pn,name='IP_1')
IP_1.run(phase=water,inlets=inlets)
IP_1.return_results()
#------------------------------------------------------------------------------
'''Perform Fickian Diffusion'''
#------------------------------------------------------------------------------
alg = op.Algorithms.FickianDiffusion(network=pn,phase=air)
# Assign Dirichlet boundary conditions to top and bottom surface pores
BC1_pores = pn.pores('right_boundary')
alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.6, pores=BC1_pores)
BC2_pores = pn.pores('left_boundary')
alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.4, pores=BC2_pores)
#Add new model to air's physics that accounts for water occupancy
phys_air.models.add(model=op.Physics.models.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
throat_occupancy='throat.occupancy',
pore_occupancy='pore.occupancy',
mode='strict',
factor=0)
#Use desired diffusive_conductance in the diffusion calculation (conductance for the dry network or water-filled network)
alg.run(conductance='throat.diffusive_conductance')
alg.return_results()
Deff = alg.calc_eff_diffusivity()
try:
# this creates a time step x num_pores, which is what the animated object needs
inv_seq = water['pore.IP_inv_seq'].squeeze()
history = []
for i in sorted(set(inv_seq)):
history.append( (inv_seq != 0) & (inv_seq < i) )
except Exception as e:
pass
#------------------------------------------------------------------------------
'''Export to VTK'''
#------------------------------------------------------------------------------
op.export()
print("sim time:" + str(time.time()-st)) |
# módulo destinado a construir el árbol del torneo
import funciones_utiles as fu
from mis_estructuras import Listirijilla
"""As seen at https://github.com/IIC2233/contenidos/blob/master/semana-03/
01-arboles%20y%20listas%20ligadas.ipynb"""
class Partido:
_id = 16
def __init__(self, equipo1=None, equipo2=None, padre=None):
self._id = Partido._id
Partido._id -= 1
if Partido._id == 0:
Partido._id = 16
self.equipo1 = equipo1
self.equipo2 = equipo2
self.padre = padre
self.hijo_izquierdo = None
self.hijo_derecho = None
self.amarillas_eq1 = Listirijilla()
self.amarillas_eq2 = Listirijilla()
self.faltas_eq1 = 0
self.rojas_eq1 = Listirijilla()
self.rojas_eq2 = Listirijilla()
self.faltas_eq2 = 0
self.goles1 = 0
self.goles2 = 0
self.ganador = None
def jugar_partido(self):
position = 0
for jugador in self.equipo1.jugadores:
prob = 5
for afinidad in self.equipo1.matriz_afinidades[position]:
if afinidad < 0.8:
prob += 2
position += 1
if fu.probabilidad(prob):
self.faltas_eq1 += 1
if fu.probabilidad(20):
self.amarillas_eq1.append(jugador.nombre)
if fu.probabilidad(5):
self.rojas_eq1.append(jugador.nombre)
position = 0
for jugador in self.equipo2.jugadores:
prob = 5
for afinidad in self.equipo2.matriz_afinidades[position]:
if afinidad < 0.8:
prob += 2
position += 1
if fu.probabilidad(prob):
self.faltas_eq2 += 1
if fu.probabilidad(20):
self.amarillas_eq2.append(jugador.nombre)
if fu.probabilidad(5):
self.rojas_eq2.append(jugador.nombre)
esperanza1 = self.equipo1.esperanza * (1-(self.faltas_eq1/100))
esperanza2 = self.equipo2.esperanza * (1-(self.faltas_eq2/100))
self.goles1 = ((esperanza1/40)**2)//1
self.goles2 = ((esperanza2/40)**2)//1
if self.goles1 > self.goles2:
ganador = self.equipo1
perdedor = self.equipo2
elif self.goles1 < self.goles2:
ganador = self.equipo2
perdedor = self.equipo1
else:
if esperanza1 > esperanza2:
if fu.probabilidad(80):
ganador = self.equipo1
perdedor = self.equipo2
else:
ganador = self.equipo2
perdedor = self.equipo1
else:
if fu.probabilidad(20):
ganador = self.equipo1
perdedor = self.equipo2
else:
ganador = self.equipo2
perdedor = self.equipo1
self.ganador = ganador.nombre
return ganador, perdedor
def __repr__(self):
try:
padre = self.padre._id
except AttributeError:
padre = None
return "ID: {}, Padre: {}, Equipo 1: {}, Equipo 2: {}"\
.format(self._id, padre, self.equipo1, self.equipo2)
class Torneo:
def __init__(self, equipos):
self.nodo_raiz, t_c = self.armar_torneo(equipos)
self.podio = Listirijilla()
self.t_c = t_c # Tercero y cuarto
def buscar_partido(self, _id):
if _id == 15:
return self.t_c
por_revisar = Listirijilla(self.nodo_raiz)
while por_revisar:
current = por_revisar.popleft()
if current._id == _id:
return current
por_revisar.append(current.hijo_izquierdo)
por_revisar.append(current.hijo_derecho)
def info_equipo(self, name="Tu Equipo"):
por_revisar = Listirijilla(self.nodo_raiz)
por_revisar.append(self.t_c)
anotados = 0
recibidos = 0
faltas = Listirijilla()
rojas = 0
amarillas = 0
eliminador = None
mejor_fase = "octavos de final"
existe_el_equipo = False
while por_revisar:
current = por_revisar.popleft()
if current is None:
continue
por_revisar.append(current.hijo_derecho)
por_revisar.append(current.hijo_izquierdo)
if current.equipo1 == name:
anotados += current.goles1
recibidos += current.goles2
amarillas += current.amarillas_eq1
rojas += current.rojas_eq1
for falta in current.faltas_eq1:
faltas.append(falta)
if not current.ganador == name:
eliminador = current.ganador
if current._id in range(9, 13):
mejor_fase = "cuartos de final"
elif current._id in range(13, 16):
mejor_fase = "semi final"
else:
mejor_fase = "final"
existe_el_equipo = True
elif current.equipo2 == name:
anotados += current.goles2
recibidos += current.goles1
amarillas += current.amarillas_eq2
rojas += current.rojas_eq2
for falta in current.faltas_eq2:
faltas.append(falta)
if not current.ganador == name:
eliminador = current.ganador
if current._id in range(9, 13):
mejor_fase = "cuartos de final"
elif current._id in range(13, 16):
mejor_fase = "semi final"
else:
mejor_fase = "final"
existe_el_equipo = True
if self.nodo_raiz.ganador == name:
existe_el_equipo = True
eliminador = "Nadie"
mejor_fase = "final"
if not existe_el_equipo:
return False
info = Listirijilla(mejor_fase)
info.append(eliminador)
info.append(anotados)
info.append(recibidos)
info.append(faltas)
info.append(rojas)
info.append(amarillas)
return info
@staticmethod
def armar_torneo(equipos):
raiz = Partido()
tercero = Partido()
semi = Listirijilla()
for i in range(2):
nuevo = Partido(padre=raiz)
semi.append(nuevo)
if raiz.hijo_izquierdo is None:
raiz.hijo_izquierdo = nuevo
tercero.hijo_izquierdo = nuevo # no es el padre
else:
raiz.hijo_derecho = nuevo
tercero.hijo_derecho = nuevo # no es el padre
cuartos = Listirijilla()
for i in range(4):
nuevo = Partido(padre=semi[i//2])
cuartos.append(nuevo)
padre = semi[i//2]
if padre.hijo_izquierdo is None:
padre.hijo_izquierdo = nuevo
else:
padre.hijo_derecho = nuevo
for i in range(8):
nuevo = Partido(equipos[2*i], equipos[(2*i)+1], cuartos[i//2])
padre = cuartos[i//2]
if padre.hijo_izquierdo is None:
padre.hijo_izquierdo = nuevo
else:
padre.hijo_derecho = nuevo
return raiz, tercero
def simular_torneo(self):
for id_partido in range(1, 13):
partido = self.buscar_partido(id_partido)
ganador, perdedor = partido.jugar_partido()
if partido.padre.equipo1 is None:
partido.padre.equipo1 = ganador
else:
partido.padre.equipo2 = ganador
for id_partido in range(13, 15):
partido = self.buscar_partido(id_partido)
ganador, perdedor = partido.jugar_partido()
if partido.padre.equipo1 is None:
partido.padre.equipo1 = ganador
t_c = self.buscar_partido(15)
t_c.equipo1 = perdedor
else:
partido.padre.equipo2 = ganador
t_c = self.buscar_partido(15)
t_c.equipo2 = perdedor
t_c = self.buscar_partido(15) # tercero y cuarto
p_s = self.buscar_partido(16) # primero y segundo
tercero, cuarto = t_c.jugar_partido()
primero, segundo = p_s.jugar_partido()
self.podio.append(primero.nombre)
self.podio.append(segundo.nombre)
self.podio.append(tercero.nombre)
def __repr__(self):
return "Torneo_type"
|
import discord
from discord.ext import commands
from pymongo import MongoClient
import os
from dotenv import load_dotenv
import asyncio
from tools import _db, _json, tools, embeds, _c, wembeds
import random
from PIL import Image, ImageFont, ImageDraw, ImageFilter
from discord_components import DiscordComponents, Button, ButtonStyle, InteractionType
intents = discord.Intents.default()
intents.members = True
client = discord.Client(intents=intents)
load_dotenv('.env')
dbclient = MongoClient(os.getenv('DBSTRING1'))
db = dbclient[os.getenv('DBSTRING2')]
class profile(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print ('profile.py -> on_ready()')
@commands.command(aliases=['cprofile', 'createp', 'cp'])
async def createprofile(self, ctx):
try:
## CHECK IF THE USER ALREADY HAS A PROFILE
collection = db["Profile"]
check = collection.count_documents({"_id": ctx.message.author.id})
if check != 0:
profile = db["Profile"].find({"_id": ctx.message.author.id})
for b in profile:
first_name=b["first_name"]
last_name=b["last_name"]
xp = b["xp"]
em = discord.Embed(color=0xadcca6, description = f"**{ctx.author.name}#{ctx.author.discriminator}** You seem to already have a profile **({first_name} {last_name} - xp: `{xp}`)**.\nPlease join the [Support Server](https://discord.gg/2TCQtNs8kN) if you believe this is a mistake.")
await ctx.send(embed=em)
return
## --------------------------------------
em = discord.Embed(color=0xadcca6, description = f"**{ctx.author.name}#{ctx.author.discriminator}** Are you sure you want to create your profile? **You can only do this once, and changing your profile is impossible.**")
msg = await ctx.send(embed=em) # "msg" is the message that will be edited throughout the setup.
## DEFINE ALL BUTTONS
buttons_1 = [
Button(style=1, label="Yes"),
Button(style=4, label="No")
],
buttons_2 = [
Button(style=1, emoji='◀️', custom_id="back"),
Button(style=1, emoji='▶️', custom_id="next"),
Button(style=3, label="Choose this weapon!"),
Button(style=4, label=_c.deny()),
],
await msg.edit(components=list(buttons_1))
def checkforR(res):
return res.user.id == ctx.author.id and res.channel.id == ctx.channel.id
res = await self.client.wait_for("button_click", check=checkforR, timeout=15)
await res.respond(type=6)
if res.component.label.startswith("N"):
await _c.cancel(msg)
elif res.component.label.startswith("Y"):
await _c.clear(msg)
weapon=['longsword', 'katana', 'dagger', 'greatsword', 'sledgehammer', 'mace']
page = 0
main_weapon = ""
await msg.edit(embed=wembeds.w_page(weapon[page], ctx.author.avatar_url, self.client), components=list(buttons_2))
while True:
res = await self.client.wait_for("button_click", check=checkforR, timeout=15)
await res.respond(type=6)
if res.component.label == "Choose this weapon!":
main_weapon = weapon[page]
break
elif res.component.custom_id == "back":
if page == 0: page = (len(weapon)-1)
else: page -= 1
await msg.edit(embed=wembeds.w_page(weapon[page], ctx.author.avatar_url, self.client), components=list(buttons_2))
elif res.component.custom_id == "next":
if page == (len(weapon)-1): page = 0
else: page += 1
await msg.edit(embed=wembeds.w_page(weapon[page], ctx.author.avatar_url, self.client), components=list(buttons_2))
elif res.component.label == _c.deny():
break
if main_weapon == "":
await _c.cancel(msg)
return
await msg.edit(embed=discord.Embed(color=0xadcca6, description=f"**{ctx.author.name}#{ctx.author.discriminator}** Are you sure you want to pick **{main_weapon}** as your main weapon?"))
await msg.edit(components=list(buttons_1))
res = await self.client.wait_for('button_click', timeout=15, check=checkforR)
await res.respond(type=6)
if res.component.label.startswith("N"):
await _c.cancel(msg)
return
# secondary weapon
await _c.clear(msg)
weapon=['bow', 'longbow']
page = 0
secondary_weapon = ""
await msg.edit(embed=wembeds.w_page(weapon[page], ctx.author.avatar_url, self.client), components=list(buttons_2))
while True:
res = await self.client.wait_for("button_click", check=checkforR, timeout=15)
await res.respond(type=6)
if res.component.label == "Choose this weapon!":
secondary_weapon = weapon[page]
break
elif res.component.custom_id == "back":
if page == 0: page = (len(weapon)-1)
else: page -= 1
await msg.edit(embed=wembeds.w_page(weapon[page], ctx.author.avatar_url, self.client), components=list(buttons_2))
elif res.component.custom_id == "next":
if page == (len(weapon)-1): page = 0
else: page += 1
await msg.edit(embed=wembeds.w_page(weapon[page], ctx.author.avatar_url, self.client), components=list(buttons_2))
elif res.component.label == _c.deny():
break
if secondary_weapon == "":
await _c.cancel(msg)
return
await msg.edit(embed=discord.Embed(color=0xadcca6, description=f"**{ctx.author.name}#{ctx.author.discriminator}** Are you sure you want to pick **{secondary_weapon}** as your secondary weapon?"))
await msg.edit(components=list(buttons_1))
res = await self.client.wait_for('button_click', timeout=15, check=checkforR)
await res.respond(type=6)
if res.component.label.startswith("N"):
await _c.cancel(msg)
return
await _c.clear(msg)
try:
_db.create_inventory(ctx.message.author.id, main_weapon, secondary_weapon)
except:
await ctx.send(embed=embeds.error_1(ctx.author.name, ctx.author.discriminator))
return
MaleLooks = _json.get_profile()["MaleLooks"]
FemaleLooks = _json.get_profile()["FemaleLooks"]
CharacterLastName = _json.get_profile()["CharacterLastName"]
CharacterFirstNameMale = _json.get_profile()["CharacterFirstNameMale"]
CharacterFirstNameFemale = _json.get_profile()["CharacterFirstNameFemale"]
maleFemaleRatio = [1, 2]
female_height = ["4'7", "4'8", "4'9", "4'10", "4'11", "5'", "5'1", "5'2", "5'3", "5'4", "5'5", "5'6", "5'7", "5'8"]
male_height = ["4'9", "4'10", "4'11", "5'", "5'11", "6'", "6'1", "5'4", "5'5", "5'6", "5'7", "5'8", "5'9", "5'10"]
age = ['18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28']
choice = random.choice(maleFemaleRatio)
if choice == 1:
gender = "Male"
looks = random.choice(MaleLooks)
first_name = random.choice(CharacterFirstNameMale)
last_name = random.choice(CharacterLastName)
height = random.choice(male_height)
age = random.choice(age)
else:
gender = "Female"
looks = random.choice(FemaleLooks)
first_name = random.choice(CharacterFirstNameFemale)
last_name = random.choice(CharacterLastName)
height = random.choice(female_height)
age = random.choice(age)
user_name = f"{ctx.author.name}#{ctx.author.discriminator}"
collection.update_one({"_id": ctx.message.author.id}, {"$set":{"gender": gender, "looks": looks, "first_name": first_name, "last_name": last_name, "height": height, "world": "Heimur", "district": "Svart", "friend_id": user_name, "age": age, "xp": 0}}, upsert=True)
await msg.delete()
await ctx.invoke(self.client.get_command('profile'), target=ctx.author)
print()
print("----")
print(f"Created profile for user {user_name} - {ctx.message.author.id}")
print(f"Created inventory for profile. ({first_name} {last_name})")
print("----")
except asyncio.TimeoutError:
await _c.timeout_button(msg)
return
@commands.command(aliases=['p'])
async def profile(self, ctx, *, target: discord.Member=None):
# find who's profile to pull up.
target = tools.get_target(target, ctx.message.author.id)
check = db["Profile"].count_documents({"_id": target})
if check == 0:
em = discord.Embed(color=0xadcca6, description = f"**{ctx.author.name}#{ctx.author.discriminator}** I couldn't find any profile linked to your account. Do `ax createprofile` to create one. Please join the [Support Server](https://discord.gg/2TCQtNs8kN) if you believe this is a mistake.")
await ctx.send(embed=em)
return
profile = db["Profile"].find({"_id": target})
for b in profile:
age = b["age"]
district = b["district"]
first_name=b["first_name"]
friend_id=b["friend_id"]
gender=b["gender"]
height=b["height"]
last_name=b["last_name"]
looks=b["looks"]
world=b["world"]
xp = b["xp"]
main_weapon = _db.get_weapons(target)[0]
secondary_weapon = _db.get_weapons(target)[1]
""" badges """
badges_string = ""
try:
badges = _db.get_badges(target)
badges = _db.split_badges(badges)
for i in range(0, len(badges)):
badges_string += f"{self.client.get_emoji(_json.get_emote_id(badges[i]))} "
except:
pass
# try:
# em.set_thumbnail(url=_json.get_art()[badges[0]])
# except:
# em.set_thumbnail(url= looks)
""" IMAGE """
profile_image = Image.open("tools/art/scroll.png")
profile_image_e = ImageDraw.Draw(profile_image)
u_font = "tools/art/fonts/Charm/Charm-Bold.ttf"
title_font = ImageFont.truetype(u_font, 40)
l_title_font = ImageFont.truetype(u_font, 25)
l_font = ImageFont.truetype(u_font, 15)
RGB = [37, 39, 43]
# name + last name
profile_image_e.text((70,35), f"{first_name} {last_name}", (RGB[0], RGB[1], RGB[2]), font=title_font)
# info card
profile_image_e.text((70,85), f"Info", (RGB[0], RGB[1], RGB[2]), font=l_title_font)
profile_image_e.text((70,115), f"Gender: {gender}\nHeight: {height}\nAge: {age}\nFriend ID: {friend_id}", (RGB[0], RGB[1], RGB[2]), font=l_font)
# region
profile_image_e.text((70,205), f"Region", (RGB[0], RGB[1], RGB[2]), font=l_title_font)
profile_image_e.text((70,235), f"World: {world}\nDistrict: {district}", (RGB[0], RGB[1], RGB[2]), font=l_font)
# level & weapons info
profile_image_e.text((70,275), f"Level", (RGB[0], RGB[1], RGB[2]), font=l_title_font)
profile_image_e.text((70,305), f"Player Level: {xp}\nPrimary Weapon: {main_weapon}\nSecondary Weapon: {secondary_weapon}", (RGB[0], RGB[1], RGB[2]), font=l_font)
if target in _json.get_config()["owners"]:
""" Add Bot Admin Badge """
badge_admin = Image.open('tools/art/badge_admin.png')
badge_admin = badge_admin.resize((40,40))
profile_image.paste(badge_admin, (260,45), badge_admin.convert('RGBA'))
image = f"tools/art/result_profile_{target}.png"
profile_image.save(image)
await ctx.send(file=discord.File(image))
@profile.error
async def profile_error(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
em = discord.Embed(color=0xadcca6, description = f"**{ctx.author.name}#{ctx.author.discriminator}** Couldn't find a Project Ax profile linked to that account.")
await ctx.send(embed=em)
def setup(client):
client.add_cog(profile(client))
|
"""Cluster Mass Richness proxy module
Define the Cluster Mass Richness proxy module and its arguments.
"""
from typing import List, Tuple, final
import numpy as np
from scipy import special
import sacc
from ..parameters import (
ParamsMap,
RequiredParameters,
DerivedParameterCollection,
)
from .cluster_mass import ClusterMass, ClusterMassArgument
from .. import parameters
class ClusterMassRich(ClusterMass):
"""Cluster Mass Richness proxy."""
def __init__(
self, pivot_mass, pivot_redshift, logMl: float = 13.0, logMu: float = 16.0
):
"""Initialize the ClusterMassRich object."""
super().__init__()
self.pivot_mass = pivot_mass
self.pivot_redshift = pivot_redshift
self.log_pivot_mass = pivot_mass * np.log(10.0)
self.log1p_pivot_redshift = np.log1p(self.pivot_redshift)
self.logMl = logMl
self.logMu = logMu
# Updatable parameters
self.mu_p0 = parameters.create()
self.mu_p1 = parameters.create()
self.mu_p2 = parameters.create()
self.sigma_p0 = parameters.create()
self.sigma_p1 = parameters.create()
self.sigma_p2 = parameters.create()
self.logM_obs_min = 0.0
self.logM_obs_max = np.inf
@final
def _update_cluster_mass(self, params: ParamsMap):
"""Perform any updates necessary after the parameters have being updated.
This implementation has nothing to do."""
@final
def _reset_cluster_mass(self) -> None:
"""Reset the ClusterMass object.
This implementation has nothing to do."""
@final
def _required_parameters(self) -> RequiredParameters:
return RequiredParameters([])
@final
def _get_derived_parameters(self) -> DerivedParameterCollection:
return DerivedParameterCollection([])
def read(self, _: sacc.Sacc):
"""Method to read the data for this source from the SACC file."""
@staticmethod
def cluster_mass_parameters_function(
log_pivot_mass, log1p_pivot_redshift, p: Tuple[float, float, float], logM, z
):
"""Return observed quantity corrected by redshift and mass."""
lnM = logM * np.log(10)
Delta_lnM = lnM - log_pivot_mass
Delta_z = np.log1p(z) - log1p_pivot_redshift
return p[0] + p[1] * Delta_lnM + p[2] * Delta_z
def cluster_mass_lnM_obs_mu_sigma(self, logM, z):
"""Return the mean and standard deviation of the observed mass."""
return [
ClusterMassRich.cluster_mass_parameters_function(
self.log_pivot_mass,
self.log1p_pivot_redshift,
(self.mu_p0, self.mu_p1, self.mu_p2),
logM,
z,
),
ClusterMassRich.cluster_mass_parameters_function(
self.log_pivot_mass,
self.log1p_pivot_redshift,
(self.sigma_p0, self.sigma_p1, self.sigma_p2),
logM,
z,
),
]
def gen_bins_by_array(self, logM_obs_bins: np.ndarray) -> List[ClusterMassArgument]:
"""Generate bins by an array of bin edges."""
if len(logM_obs_bins) < 2:
raise ValueError("logM_obs_bins must have at least two elements")
# itertools.pairwise is only available in Python 3.10
# using zip instead
return [
ClusterMassRichBinArgument(
self, self.logMl, self.logMu, logM_obs_lower, logM_obs_upper
)
for logM_obs_lower, logM_obs_upper in zip(
logM_obs_bins[:-1], logM_obs_bins[1:]
)
]
def point_arg(self, logM_obs: float) -> ClusterMassArgument:
"""Return the argument generator of the cluster mass function."""
return ClusterMassRichPointArgument(self, self.logMl, self.logMu, logM_obs)
def gen_bin_from_tracer(self, tracer: sacc.BaseTracer) -> ClusterMassArgument:
"""Return the argument for the given tracer."""
if not isinstance(tracer, sacc.tracers.BinRichnessTracer):
raise ValueError("Tracer must be a BinRichnessTracer")
return ClusterMassRichBinArgument(
self, self.logMl, self.logMu, tracer.lower, tracer.upper
)
class ClusterMassRichPointArgument(ClusterMassArgument):
"""Argument for the Cluster Mass Richness proxy."""
def __init__(
self,
richness: ClusterMassRich,
logMl: float,
logMu: float,
logM_obs: float,
):
super().__init__(logMl, logMu)
self.richness: ClusterMassRich = richness
self.logM_obs: float = logM_obs
@property
def dim(self) -> int:
"""Return the dimension of the argument."""
return 0
def get_logM_bounds(self) -> Tuple[float, float]:
"""Return the bounds of the cluster mass argument."""
return (self.logMl, self.logMu)
def get_proxy_bounds(self) -> List[Tuple[float, float]]:
"""Return the bounds of the cluster mass proxy argument."""
return []
def p(self, logM: float, z: float, *_) -> float:
"""Return the probability of the point argument."""
lnM_obs = self.logM_obs * np.log(10.0)
lnM_mu, sigma = self.richness.cluster_mass_lnM_obs_mu_sigma(logM, z)
x = lnM_obs - lnM_mu
chisq = np.dot(x, x) / (2.0 * sigma**2)
likelihood = np.exp(-chisq) / (np.sqrt(2.0 * np.pi * sigma**2))
return likelihood * np.log(10.0)
class ClusterMassRichBinArgument(ClusterMassArgument):
"""Argument for the Cluster Mass Richness proxy."""
def __init__(
self,
richness: ClusterMassRich,
logMl: float,
logMu: float,
logM_obs_lower: float,
logM_obs_upper: float,
):
super().__init__(logMl, logMu)
self.richness: ClusterMassRich = richness
self.logM_obs_lower: float = logM_obs_lower
self.logM_obs_upper: float = logM_obs_upper
if logM_obs_lower >= logM_obs_upper:
raise ValueError("logM_obs_lower must be less than logM_obs_upper")
@property
def dim(self) -> int:
"""Return the dimension of the argument."""
return 0
def get_logM_bounds(self) -> Tuple[float, float]:
"""Return the bounds of the cluster mass argument."""
return (self.logMl, self.logMu)
def get_proxy_bounds(self) -> List[Tuple[float, float]]:
"""Return the bounds of the cluster mass proxy argument."""
return []
def p(self, logM: float, z: float, *_) -> float:
"""Return the probability of the binned argument."""
lnM_obs_mu, sigma = self.richness.cluster_mass_lnM_obs_mu_sigma(logM, z)
x_min = (lnM_obs_mu - self.logM_obs_lower * np.log(10.0)) / (
np.sqrt(2.0) * sigma
)
x_max = (lnM_obs_mu - self.logM_obs_upper * np.log(10.0)) / (
np.sqrt(2.0) * sigma
)
if x_max > 3.0 or x_min < -3.0:
# pylint: disable-next=no-member
return -(special.erfc(x_min) - special.erfc(x_max)) / 2.0
# pylint: disable-next=no-member
return (special.erf(x_min) - special.erf(x_max)) / 2.0
|
# Generated by Django 2.2.5 on 2020-05-21 16:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('listings', '0017_auto_20200521_2058'),
]
operations = [
migrations.AlterField(
model_name='commentlikes',
name='comment_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='listings.Comment'),
),
migrations.AlterField(
model_name='phonelikes',
name='phone_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='listings.MobilePhone'),
),
migrations.AlterField(
model_name='phonereviews',
name='phone_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='listings.MobilePhone'),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2020-03-26 08:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0017_auto_20200319_1057'),
]
operations = [
migrations.CreateModel(
name='RequestInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('department', models.CharField(blank=True, max_length=125, null=True, verbose_name='部门名称')),
('role_name', models.CharField(blank=True, max_length=125, null=True, verbose_name='角色名称')),
('status', models.IntegerField(blank=True, default=0, null=True, verbose_name='status')),
('user', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='request_info', to='user.Department', verbose_name='user')),
],
options={
'verbose_name': '信息',
'verbose_name_plural': '信息',
'db_table': 'tb_Requestinfo',
},
),
migrations.RemoveField(
model_name='user',
name='department',
),
migrations.AddField(
model_name='user',
name='department',
field=models.ManyToManyField(blank=True, db_index=True, default=None, null=True, related_name='users', to='user.Department', verbose_name='部门'),
),
]
|
from django.urls import path,re_path
from . import views
from django.conf.urls import url #導入url套件
app_name='MessageEmotion'
from django.conf.urls import url, include
from django.contrib import admin
# app_name = 'blog'
urlpatterns = [
path('', views.MessageEmotion, name="MessageEmotion"),
re_path(r'^ajax/ajax_MessageEmotion/$', views.ajax_MessageEmotion, name="ajax_MessageEmotion"),
] |
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from random import randint
maingame =Tk()
maingame.title("TIC TAC TOE")
maingame.configure(background = '#FA5858')
activePlayer = 1
listPlayer1 = []
listPlayer2 = []
computerList = []
comupterPlayed = []
gamemode = 0
def setplayer(n):
global gamemode
if(n==1):
gamemode = 1
elif (n==2):
gamemode = 2
# Start Select PLayer Section
choose1 = Button(maingame,text = "One Player",foreground="#2E2E2E",background="#A9F5F2",font="Aerial 12")
choose1.config(command=lambda : setplayer(1))
choose1.grid(row = 0,column = 0,padx=(35, 35),pady=(30, 30),ipady=10,ipadx=10)
choose2 = Button(maingame,text = "Two Players",foreground="#2E2E2E",background="#58FAF4",font="Aerial 12")
choose2.config(command=lambda : setplayer(2))
choose2.grid(row = 0,column = 2,padx=(35, 35),pady=(30, 30),ipady=10,ipadx=10)
# End Select PLayer Section
# Start Buttton Section
but1 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but1.config(command=lambda : makeChange(1))
but1.grid(row=1,column=0,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but2 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but2.config(command=lambda : makeChange(2))
but2.grid(row=1,column=1,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but3 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but3.config(command=lambda : makeChange(3))
but3.grid(row=1,column=2,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but4 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but4.config(command=lambda : makeChange(4))
but4.grid(row=2,column=0,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but5 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but5.config(command=lambda : makeChange(5))
but5.grid(row=2,column=1,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but6 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but6.config(command=lambda : makeChange(6))
but6.grid(row=2,column=2,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but7 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but7.config(command=lambda : makeChange(7))
but7.grid(row=3,column=0,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but8 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but8.config(command=lambda : makeChange(8))
but8.grid(row=3,column=1,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
but9 = Button(maingame,text = " ",background="#FFFFFF",font="Times 16")
but9.config(command=lambda : makeChange(9))
but9.grid(row=3,column=2,padx=(40, 40),pady=(30, 30),ipady=40,ipadx=40)
# End Buttton Sections
# Start Select Comupter or Players Section
def makeChange(place):
global activePlayer
if (gamemode==1):
if (activePlayer==1):
activePlayer=2
maingame.title("TIC TAC TOY : Player 1")
changeButton(place,'X')
listPlayer1.append(place)
Computer()
winner_computer()
elif (activePlayer==2):
activePlayer=1
maingame.title("TIC TAC TOY : Player 1")
changeButton(place,'O')
listPlayer2.append(place)
elif (gamemode==2):
if (activePlayer==1):
activePlayer=2
maingame.title("TIC TAC TOY : Player 1")
changeButton(place,'X')
listPlayer1.append(place)
elif (activePlayer==2):
activePlayer=1
maingame.title("TIC TAC TOY : Player 2")
changeButton(place,'O')
listPlayer2.append(place)
winner()
def Computer():
global listPlayer1
global listPlayer2
global comupterPlayed
computerList = []
for i in range(9):
if not(i+1 in listPlayer1 or i+1 in listPlayer2):
computerList.append(i+1)
if(len(computerList)>1):
randindex = randint(0,len(computerList)-1)
comupterPlayed.append(computerList[randindex])
makeChange(computerList[randindex])
# End Select Comupter or Players Section
#Start Winner Section
def winner_computer():
winner_computer=0
global comupterPlayed
global listPlayer1
#Game_Rule
if 1 in comupterPlayed and 2 in comupterPlayed and 3 in comupterPlayed:
winner_computer = 1
elif 4 in comupterPlayed and 5 in comupterPlayed and 6 in comupterPlayed:
winner_computer = 1
elif 1 in comupterPlayed and 5 in comupterPlayed and 9 in comupterPlayed:
winner_computer = 1
elif 3 in comupterPlayed and 5 in comupterPlayed and 7 in comupterPlayed:
winner_computer = 1
elif 7 in comupterPlayed and 8 in comupterPlayed and 9 in comupterPlayed:
winner_computer = 1
elif 1 in comupterPlayed and 4 in comupterPlayed and 7 in comupterPlayed:
winner_computer = 1
elif 2 in comupterPlayed and 5 in comupterPlayed and 8 in comupterPlayed:
winner_computer = 1
elif 3 in comupterPlayed and 6 in comupterPlayed and 9 in comupterPlayed:
winner_computer = 1
elif 1 in listPlayer1 and 2 in listPlayer1 and 3 in listPlayer1:
winner_computer = 2
elif 4 in listPlayer1 and 5 in listPlayer1 and 6 in listPlayer1:
winner_computer = 2
elif 1 in listPlayer1 and 5 in listPlayer1 and 9 in listPlayer1:
winner_computer = 2
elif 3 in listPlayer1 and 5 in listPlayer1 and 7 in listPlayer1:
winner_computer = 2
elif 7 in listPlayer1 and 8 in listPlayer1 and 9 in listPlayer1:
winner_computer = 2
elif 1 in listPlayer1 and 4 in listPlayer1 and 7 in listPlayer1:
winner_computer = 2
elif 2 in listPlayer1 and 5 in listPlayer1 and 8 in listPlayer1:
winner_computer = 2
elif 3 in listPlayer1 and 6 in listPlayer1 and 9 in listPlayer1:
winner_computer = 2
if (winner_computer==1):
messagebox.showinfo(title="Winner",message="Compuer")
maingame.destroy()
elif (winner_computer==2):
messagebox.showinfo(title="Winner",message="Player 1")
maingame.destroy()
if (len(listPlayer1)==5 and winner_computer==0 and len(comupterPlayed)==4):
messagebox.showinfo(title="NO winner",message="Draw")
maingame.destroy()
def winner():
winner = 0
#Game_Rule
if 1 in listPlayer1 and 2 in listPlayer1 and 3 in listPlayer1:
winner = 1
elif 4 in listPlayer1 and 5 in listPlayer1 and 6 in listPlayer1:
winner = 1
elif 3 in listPlayer1 and 5 in listPlayer1 and 7 in listPlayer1:
winner = 1
elif 1 in listPlayer1 and 5 in listPlayer1 and 9 in listPlayer1:
winner = 1
elif 7 in listPlayer1 and 8 in listPlayer1 and 9 in listPlayer1:
winner = 1
elif 1 in listPlayer1 and 4 in listPlayer1 and 7 in listPlayer1:
winner = 1
elif 2 in listPlayer1 and 5 in listPlayer1 and 8 in listPlayer1:
winner = 1
elif 3 in listPlayer1 and 6 in listPlayer1 and 9 in listPlayer1:
winner = 1
elif 1 in listPlayer2 and 2 in listPlayer2 and 3 in listPlayer2:
winner = 2
elif 4 in listPlayer2 and 5 in listPlayer2 and 6 in listPlayer2:
winner = 2
elif 7 in listPlayer2 and 8 in listPlayer2 and 9 in listPlayer2:
winner = 2
elif 1 in listPlayer2 and 4 in listPlayer2 and 7 in listPlayer2:
winner = 2
elif 2 in listPlayer2 and 5 in listPlayer2 and 8 in listPlayer2:
winner = 2
elif 3 in listPlayer2 and 6 in listPlayer2 and 9 in listPlayer2:
winner = 2
if (winner==1):
messagebox.showinfo(title="Winner",message="Player 1")
maingame.destroy()
elif (winner==2):
messagebox.showinfo(title="Winner",message="Player 2")
maingame.destroy()
if len(listPlayer2)==4 and winner==0 and len(listPlayer1)==5:
messagebox.showinfo(title="NO winner",message="Draw")
maingame.destroy()
#End Winner Section
# Start Set Changed Played Section
def changeButton(place,text):
if (place==1):
but1.config(text=text,state=DISABLED)
elif (place==2):
but2.config(text=text,state=DISABLED)
elif (place==3):
but3.config(text=text,state=DISABLED)
elif (place==4):
but4.config(text=text,state=DISABLED)
elif (place==5):
but5.config(text=text,state=DISABLED)
elif (place==6):
but6.config(text=text,state=DISABLED)
elif (place==7):
but7.config(text=text,state=DISABLED)
elif (place==8):
but8.config(text=text,state=DISABLED)
elif (place==9):
but9.config(text=text,state=DISABLED)
# Start Set Changed Played Section
maingame.mainloop()
|
#!/usr/local/bin/python
# encoding: utf-8
"""
fetch_ryanair_prices.py
Created by Jakub Konka on 2011-10-8.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
import sys
import os
import re
from mechanize import Browser
br = Browser()
br.open("http://www.ryanair.com/")
for f in br.forms():
print(f)
# br.select_form(nr=0)
# br.form['q'] = 'baggage'
# br.submit()
# print(br.response().read()) |
#!/usr/bin/env python3
import sys
import subprocess
import os
import json
from datetime import datetime, date
from pymongo import MongoClient
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Pie, Figure, Layout, Heatmap, Scatter
def import_meta_data(directory):
def all_images(directory):
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
if not f.startswith("."):
yield os.path.abspath(os.path.join(dirpath, f))
def export_meta_data(image, output_format='-json'):
metadata = subprocess.check_output(['exiftool', output_format, image]).decode('utf-8')
if output_format == '-json':
return next(iter(json.loads(metadata)))
return metadata
client = MongoClient('localhost', 27017)
exif = client.metadata.exif
for image in all_images(directory):
meta_data = export_meta_data(image)
exif.insert_one(meta_data)
print('.', end='', flush=True)
def focal_lengths_pie_chart():
def gather_data():
client = MongoClient('localhost', 27017)
exif = client.metadata.exif
map = """
function focalLengthMap() {
if (this.FocalLength == null || this.FocalLength == 'undef') return;
emit(this.FocalLength, 1);
}"""
reduce = """
function focalLengthReduce(key, values) {
return Array.sum(values);
}"""
return exif.map_reduce(map, reduce, "focal_lenght_results").find()
def create_pie_chart(results):
labels = []
values = []
for result in results:
labels.append(result['_id'])
values.append(result['value'])
plot([Pie(labels=labels, values=values)], filename='%s_focal_lengths.html' % date.today().strftime('%Y-%m-%d'))
create_pie_chart(gather_data())
def time_of_day_heatmap():
def gather_data():
client = MongoClient('localhost', 27017)
exif = client.metadata.exif
events = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Monday
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Tuesday
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Wednesday
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Thursday
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Friday
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Saturday
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # Sunday
]
for result in exif.find():
if result['CreateDate'] is not None:
parsed_date = datetime.strptime(result['CreateDate'], '%Y:%m:%d %H:%M:%S')
hour = parsed_date.hour
weekday = parsed_date.weekday()
events[weekday][hour] += 1
return events
def create_heatmap(events):
hours = ['00','01','02','03','04','05','06','07','08','09','10', '11','12','13','14','15','16','17','18','19','20','21','22','23']
days = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday']
heatmap = Heatmap(z=events, y=days, x=hours, colorscale='Viridis')
layout = Layout(title='Pictures per weekday & time of day', xaxis={'tickmode': 'linear'})
plot(Figure(data=[heatmap], layout=layout), filename='%s_pictures_per_weekday_and_time_of_day.html' % date.today().strftime('%Y-%m-%d'))
create_heatmap(gather_data())
def aperture_and_shutter_speed_bubble_chart():
def gather_data():
client = MongoClient('localhost', 27017)
exif = client.metadata.exif
map = """
function focalLengthMap() {
if (!this.Aperture || this.Aperture === 'undef') return;
if (!this.ShutterSpeed || this.ShutterSpeed === 'undef') return;
emit({aperture: this.Aperture, shutterSpeed: this.ShutterSpeed}, 1);
}"""
reduce = """
function focalLengthReduce(key, values) {
return Array.sum(values);
}"""
return exif.map_reduce(map, reduce, "aperture_and_shutter_speeds").find()
def create_bubble_chart(events):
shutter_speed = []
aperture = []
size = []
text = []
for event in events:
aperture.append(event['_id']['aperture'])
shutter_speed.append(event['_id']['shutterSpeed'])
size.append(event['value'])
text.append('Aperture: %s<br>Shutter Speed: %s<br>Count: %s' % (event['_id']['aperture'], event['_id']['shutterSpeed'], event['value']))
trace = Scatter(
x=shutter_speed,
y=aperture,
mode='markers',
marker={
'size': size
},
text=text
)
layout= Layout(
title='Aperture and Shutter Speeds',
hovermode='closest',
xaxis={
'title': 'Shutter Speed',
'ticklen': 5,
'zeroline': False,
'gridwidth': 2
},
yaxis={
'range': [0, 25],
'title': 'Aperture',
'ticklen': 5,
'gridwidth': 2,
'zeroline': False
},
showlegend=False
)
plot(Figure(data=[trace], layout=layout), filename='%s_aperture_and_shutter_speed.html' % date.today().strftime('%Y-%m-%d'))
create_bubble_chart(gather_data())
if __name__ == "__main__":
# TODO: only import pictures we have not yet seen before
import_meta_data(next(iter(sys.argv[1:])))
focal_lengths_pie_chart()
time_of_day_heatmap()
aperture_and_shutter_speed_bubble_chart()
|
import asyncio
from discord.ext import commands
@commands.group(brief="TBD")
async def channel(ctx: commands.Context):
if ctx.invoked_subcommand is None:
await ctx.send("Need subcommand")
@channel.command(brief="TBD")
async def create(ctx: commands.Context, *, name: str):
# text channel format : no space, use `-`
# voice channel name : can include space
channel = ctx.channel
def check(m):
return m.channel == channel
try:
await ctx.bot.wait_for("message", check=check, timeout=60.0)
await ctx.send("subcommand test")
except asyncio.TimeoutError:
await ctx.message.delete()
def setup(bot: commands.Bot):
bot.add_command(channel)
|
from python_framework import Controller, ControllerMethod, HttpStatus
from dto import ContactDto
@Controller(url = '/contact', tag='Contact', description='Contact controller')
class ContactController:
@ControllerMethod(url = '/',
requestClass = ContactDto.ContactRequestDto,
responseClass = ContactDto.ContactResponseDto
)
def put(self, dto):
return self.service.contact.safellyCreateOrUpdate(dto), HttpStatus.OK
@Controller(url = '/contact/batch', tag='Contact', description='Contact controller')
class ContactBatchController:
@ControllerMethod(url = '/',
responseClass = [[ContactDto.ContactResponseDto]]
)
def get(self) :
return self.service.contact.findAll(), HttpStatus.OK
@ControllerMethod(url = '/',
requestClass = [[ContactDto.ContactRequestDto]],
responseClass = [[ContactDto.ContactResponseDto]]
)
def put(self, dtoList):
return self.service.contact.safellyCreateOrUpdateAll(dtoList), HttpStatus.OK
|
from jinja2 import Template
import os
import pdb
import sys
print(sys.argv)
# read in yaml file as string
file_string = ""
with open("./resources/envoy.yaml", "r") as file:
file_string_array = file.readlines()
file_string = "".join(file_string_array)
# get env vars and set defaults
LISTENER_ADDRESS = os.getenv("LISTENER_ADDRESS", "0.0.0.0")
LISTENER_PORT = os.getenv("LISTENER_PORT", "80")
PROXY_ADDRESS = os.getenv("PROXY_ADDRESS", "host.docker.internal")
PROXY_PORT = os.getenv("PROXY_PORT", "8080")
# assign string of rendered template
template = Template(file_string)
new_file_string = template.render(
LISTENER_ADDRESS = LISTENER_ADDRESS,
LISTENER_PORT = LISTENER_PORT,
PROXY_ADDRESS = PROXY_ADDRESS,
PROXY_PORT = PROXY_PORT
)
# save string as yaml file
with open("/etc/envoy.yaml", "w") as output_file:
output_file.write(new_file_string)
|
from bs4 import BeautifulSoup
import urllib.request
import csv |
import pyglet
import math
from pyglet.gl import *
from pyglet.window import key
class Model:
def get_texture(self, file):
tex = pyglet.image.load(file).texture
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
return pyglet.graphics.TextureGroup(tex)
def __init__(self):
self.top = self.get_texture('cube/resource/grass_top.png')
self.side = self.get_texture('cube/resource/grass_side.png')
self.bottom = self.get_texture('cube/resource/dirt.png')
self.batch = pyglet.graphics.Batch()
x, y, z = 0, 0, -1
X, Y, Z = x+1, y+1, z+1
#colour = ('c3f', (1, 1, 1,) * 4)
texture_coords = ('t2f', (x,y, X,y, X,Y, x,Y, ))
self.batch.add(4, GL_QUADS, self.side, ('v3f', (x,y,Z, X,y,Z, X,Y,Z, x,Y,Z, )), texture_coords) # front
self.batch.add(4, GL_QUADS, self.side, ('v3f', (X,y,z, x,y,z, x,Y,z, X,Y,z, )), texture_coords) # back
self.batch.add(4, GL_QUADS, self.side, ('v3f', (x,y,z, x,y,Z, x,Y,Z, x,Y,z, )), texture_coords) # left
self.batch.add(4, GL_QUADS, self.side, ('v3f', (X,y,Z, X,y,z, X,Y,z, X,Y,Z, )), texture_coords) # right
self.batch.add(4, GL_QUADS, self.top, ('v3f', (x,Y,Z, X,Y,Z, X,Y,z, x,Y,z, )), texture_coords) # top
self.batch.add(4, GL_QUADS, self.bottom, ('v3f', (x,y,z, X,y,z, X,y,Z, x,y,Z, )), texture_coords) # bottom
def draw(self):
self.batch.draw()
class Player:
def __init__(self, pos=(0, 0, 0), rot=(0, 0)):
self.pos = list(pos)
self.rot = list(rot)
def mouse_motion(self, dx, dy):
dx /= 8; dy /= 8; self.rot[0] += dy; self.rot[1] -= dx
if self.rot[0]>90: self.rot[0] = 90
elif self.rot[0]<-90: self.rot[0] = -90
def update(self, dt, keys):
s = dt*10
rotY = -self.rot[1] / 180 * math.pi
dx, dz = s*math.sin(rotY), s*math.cos(rotY)
if keys[key.W]:
self.pos[0] += dx; self.pos[2] -= dz
if keys[key.S]:
self.pos[0] -= dx; self.pos[2] += dz
if keys[key.A]:
self.pos[0] -= dz; self.pos[2] -= dx
if keys[key.D]:
self.pos[0] += dz; self.pos[2] += dx
if keys[key.SPACE]: self.pos[1] += s
if keys[key.LSHIFT]: self.pos[1] -= s
class Window(pyglet.window.Window):
def push(self, pos, rotation):
glPushMatrix()
# player viewpoint - rotation
glRotatef(-rotation[0], 1, 0, 0)
glRotatef(-rotation[1], 0, 1, 0)
# player viewpoint - position
# x, y, z = pos
glTranslatef(-pos[0], -pos[1], -pos[2], )
def Projection(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
def Model(self):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set2d(self):
self.Projection()
gluOrtho2D(0, self.width, 0, self.height)
self.Model()
def set3d(self):
self.Projection()
gluPerspective(70, self.width/self.height, 0.05, 1000)
self.Model()
def setLock(self, state):
self.lock = state
self.set_exclusive_mouse(state)
lock = False
mouse_lock = property(lambda self:self.lock, setLock)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_minimum_size(300, 200)
self.keys = key.KeyStateHandler()
self.push_handlers(self.keys)
pyglet.clock.schedule(self.update)
self.model = Model()
self.player = Player(pos=(0.5, 1.5, 1.5), rot=(-30, 30))
def on_mouse_motion(self, x, y, dx, dy):
if self.mouse_lock:
self.player.mouse_motion(dx, dy)
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE:
self.close()
elif symbol == key.E:
self.mouse_lock = not self.mouse_lock
def update(self, dt):
self.player.update(dt, self.keys)
def on_draw(self):
self.clear()
self.set3d()
self.push(self.player.pos, self.player.rot)
self.model.draw()
glPopMatrix()
def main():
window = Window(width=400, height=300, caption='New Jericho', resizable=True)
glClearColor(0.5, 0.7, 1, 1)
glEnable(GL_DEPTH_TEST)
#glEnable(GL_CULL_FACE) # Not draw faces from behind. Good way to test if you defined faces well.
pyglet.app.run()
if __name__ == '__main__':
main()
|
'''
This package demonstrates to check
1) View total number of access of a site for a page for all users
2) View total number of access between a start date and end date for a user
3) View the top N clients that are access the site for the most
We implement the requirements in two ways, using Hashmap and orderedset
''' |
import numpy as np
from src.Module.module import Module
class ReLU(Module):
def forward(self, X):
return np.where(X < 0, 0, X)
def backward_delta(self, input, delta):
return delta * np.where(input < 0, 0, 1)
|
def my_function():
global c, b, a, d, 次数
c = 1
b = 0
a = 0
d = 0
microbot.microbot_init()
microbot.clear_light()
WonderCam.wondercam_init()
WonderCam.change_func(WonderCam.Functions.CLASSIFICATION)
次数 = 0
microbot.onremote_ir_pressed(microbot.IRKEY.R0, my_function)
def my_function2():
global a, b, c, d
microbot.microbot_init()
WonderCam.wondercam_init()
WonderCam.turn_on_or_off_led(WonderCam.LED_STATE.OFF)
WonderCam.change_func(WonderCam.Functions.NO_FUNCTION)
a = 0
b = 0
c = 0
d = 0
microbot.clear_light()
microbot.belt_clearLight()
microbot.set_motor_speed(0, 0)
microbot.onremote_ir_pressed(microbot.IRKEY.A, my_function2)
def my_function3():
global b, c, a, d, 目标Y, 目标X
b = 1
c = 0
a = 0
d = 0
microbot.microbot_init()
WonderCam.wondercam_init()
WonderCam.change_func(WonderCam.Functions.FACE_DETECT)
WonderCam.turn_on_or_off_led(WonderCam.LED_STATE.ON)
WonderCam.set_led_brightness(100)
microbot.set_brightness(50)
microbot.set_pixel_rgb(microbot.Lights.LIGHT6, RGBColors.INDIGO)
microbot.set_pixel_rgb(microbot.Lights.LIGHT5, RGBColors.INDIGO)
microbot.show_light()
目标Y = 150
目标X = 160
microbot.onremote_ir_pressed(microbot.IRKEY.R2, my_function3)
def my_function4():
global d, a, b, c
d = 1
a = 0
b = 0
c = 0
microbot.microbot_init()
WonderCam.wondercam_init()
WonderCam.change_func(WonderCam.Functions.LINE_FOLLOWING)
WonderCam.turn_on_or_off_led(WonderCam.LED_STATE.ON)
WonderCam.set_led_brightness(100)
microbot.set_brightness(50)
microbot.set_pixel_rgb(microbot.Lights.LIGHT5, RGBColors.WHITE)
microbot.set_pixel_rgb(microbot.Lights.LIGHT6, RGBColors.WHITE)
microbot.belt_setPixelRGB(microbot.LightsBelt.LIGHT1, RGBColors.WHITE)
microbot.show_light()
microbot.onremote_ir_pressed(microbot.IRKEY.R3, my_function4)
def my_function5():
global a, b, c, d, 目标Y, 目标X
a = 1
b = 0
c = 0
d = 0
microbot.microbot_init()
WonderCam.wondercam_init()
WonderCam.change_func(WonderCam.Functions.COLOR_DETECT)
WonderCam.turn_on_or_off_led(WonderCam.LED_STATE.ON)
WonderCam.set_led_brightness(100)
microbot.set_brightness(50)
microbot.set_pixel_rgb(microbot.Lights.LIGHT6, RGBColors.WHITE)
microbot.set_pixel_rgb(microbot.Lights.LIGHT5, RGBColors.WHITE)
microbot.show_light()
目标Y = 150
目标X = 160
microbot.onremote_ir_pressed(microbot.IRKEY.R1, my_function5)
上次的偏移 = 0
转向速度1 = 0
偏移 = 0
夹角 = 0
结果 = 0
电机2速度 = 0
电机1速度 = 0
转向速度 = 0
前进速度 = 0
中心Y = 0
中心X = 0
目标X = 0
目标Y = 0
次数 = 0
d = 0
c = 0
b = 0
a = 0
microbot.microbot_init()
a = 0
b = 0
c = 0
d = 0
def on_forever():
global 中心X, 中心Y, 前进速度, 转向速度, 电机1速度, 电机2速度, 次数, 结果
if a == 1:
WonderCam.update_result()
if WonderCam.is_detected_color_id(1):
中心X = WonderCam.xof_color_id(WonderCam.Options.POS_X, 1)
中心Y = WonderCam.xof_color_id(WonderCam.Options.POS_Y, 1) + WonderCam.xof_color_id(WonderCam.Options.HEIGHT, 1) / 2
if abs(目标Y - 中心Y) > 20:
if 目标Y > 中心Y:
前进速度 = (目标Y - 中心Y) * 0.12
else:
前进速度 = (目标Y - 中心Y) * 0.12
else:
前进速度 = 0
前进速度 = Math.constrain(前进速度, -80, 80)
if abs(目标X - 中心X) > 40:
转向速度 = Math.constrain((目标X - 中心X) * 0.07, -30, 30)
else:
转向速度 = 0
电机1速度 = Math.constrain(前进速度 - 转向速度, -100, 100)
电机2速度 = Math.constrain(前进速度 + 转向速度, -100, 100)
if 电机2速度 > 0:
电机2速度 = Math.map(电机2速度, 0, 100, 10, 90)
elif 电机2速度 < 0:
电机2速度 = Math.map(电机2速度, 0, -100, -10, -90)
else:
电机2速度 = 0
if 电机1速度 > 0:
电机1速度 = Math.map(电机1速度, 0, 100, 10, 90)
elif 电机1速度 < 0:
电机1速度 = Math.map(电机1速度, 0, -100, -10, -90)
else:
电机1速度 = 0
microbot.set_motor_speed(电机1速度, 电机2速度)
else:
电机1速度 = 0
电机2速度 = 0
microbot.set_motor_speed(电机1速度, 电机2速度)
if b == 1:
WonderCam.update_result()
if WonderCam.is_detected_face(1):
music.start_melody(music.built_in_melody(Melodies.RINGTONE), MelodyOptions.ONCE)
中心X = WonderCam.getlearned_face_y(WonderCam.Options.POS_X, 1)
中心Y = WonderCam.getlearned_face_y(WonderCam.Options.POS_Y, 1) + WonderCam.getlearned_face_y(WonderCam.Options.HEIGHT, 1) / 2
if abs(目标Y - 中心Y) > 20:
if 目标Y > 中心Y:
前进速度 = (目标Y - 中心Y) * 0.12
else:
前进速度 = (目标Y - 中心Y) * 0.12
else:
前进速度 = 0
前进速度 = Math.constrain(前进速度, -80, 80)
if abs(目标X - 中心X) > 40:
转向速度 = Math.constrain((目标X - 中心X) * 0.07, -30, 30)
else:
转向速度 = 0
电机1速度 = Math.constrain(前进速度 - 转向速度, -100, 100)
电机2速度 = Math.constrain(前进速度 + 转向速度, -100, 100)
if 电机2速度 > 0:
电机2速度 = Math.map(电机2速度, 0, 100, 10, 90)
elif 电机2速度 < 0:
电机2速度 = Math.map(电机2速度, 0, -100, -10, -90)
else:
电机2速度 = 0
if 电机1速度 > 0:
电机1速度 = Math.map(电机1速度, 0, 100, 10, 90)
elif 电机1速度 < 0:
电机1速度 = Math.map(电机1速度, 0, -100, -10, -90)
else:
电机1速度 = 0
microbot.set_motor_speed(电机1速度, 电机2速度)
else:
电机1速度 = 0
电机2速度 = 0
microbot.set_motor_speed(电机1速度, 电机2速度)
if c == 1:
WonderCam.update_result()
if WonderCam.max_confidence_id() == 结果 and WonderCam.max_confidence_id() != 1:
次数 += 1
else:
次数 = 0
microbot.set_motor_speed(15, 15)
结果 = WonderCam.max_confidence_id()
if 次数 > 3:
microbot.set_motor_speed(0, 0)
if 结果 == 4:
microbot.set_motor_speed(-15, 15)
basic.pause(500)
microbot.set_motor_speed(0, 0)
microbot.set_motor_speed(15, 15)
if 结果 == 5:
microbot.set_motor_speed(15, -15)
basic.pause(500)
microbot.set_motor_speed(0, 0)
microbot.set_motor_speed(15, 15)
if 结果 == 6:
microbot.set_motor_speed(0, 0)
basic.pause(2000)
if 结果 == 7:
microbot.set_motor_speed(15, -15)
basic.pause(1000)
microbot.set_motor_speed(0, 0)
microbot.set_motor_speed(15, 15)
if a == 0 and (b == 0 and c == 0):
microbot.set_motor_speed(0, 0)
basic.forever(on_forever)
def on_forever2():
global 夹角, 偏移, 转向速度1, 上次的偏移
if d == 1:
WonderCam.update_result()
if WonderCam.is_detected_line_id(1):
microbot.belt_setPixelRGB(microbot.LightsBelt.LIGHT1, RGBColors.BLUE)
microbot.belt_showLight()
夹角 = WonderCam.start_xof_line_id(WonderCam.Line_Options.THETA, 1)
偏移 = WonderCam.start_xof_line_id(WonderCam.Line_Options.RHO, 1)
转向速度1 = 偏移*0.02+(偏移-上次的偏移)*0.008+夹角+0.01
上次的偏移 = 偏移
basic.forever(on_forever2)
|
import random
from rsa import is_probably_prime
from rsa import get_inverse
import hashlib
class DSA:
def __init__(self, hash_algorithm, seed_length):
self._hashalg = hash_algorithm
self._hash = hashlib.new(self._hashalg)
self._outlen = hashlib.new(self._hashalg).digest_size * 8
print('HASH LENGTH =', self._outlen)
self._seedlen = seed_length
self._domain_parameter_seed = 0
self._p = 0
self._q = 0
def Hash(self, x):
self._hash.update(int(x).to_bytes(20, 'big'))
return int(self._hash.hexdigest(), 16)
def generate_p_q(self, L, N):
n = int(L / self._outlen) - 1
b = L - 1 - (n * self._outlen)
flag = False
while not flag:
while True:
self._domain_parameter_seed = random.getrandbits(self._seedlen)
print('SEED =', self._domain_parameter_seed)
tmp1 = pow(2, N - 1)
U = self.Hash(self._domain_parameter_seed) % tmp1
q = tmp1 + U + 1 - (U % 2)
if is_probably_prime(q, 38):
break
offset = 1
for counter in range(0, 4 * L):
print(counter)
V = [0 for i in range(n + 1)]
for j in range(n + 1):
V[j] = self.Hash((self._domain_parameter_seed + offset + j) % pow(2, self._seedlen))
W = V[0] + sum([V[j] * pow(2, j * self._outlen) for j in range(1, n)]) + (V[n] % pow(2, b)) * pow(2,
n * self._outlen)
tmp2 = pow(2, L - 1)
X = W + tmp2
c = X % (2 * q)
p = X - (c - 1)
if (p >= tmp2) and (is_probably_prime(p, 38)):
flag = True
self._p = p
self._q = q
print('counter =', counter)
print('p =', p)
print('q =', q)
break
offset = offset + n + 1
def get_sign(self, M):
e = (self._p - 1) * get_inverse(self._q, self._p)
g = 0
while True:
h = random.randint(1, self._p - 1)
g = pow(h, e, self._p)
if g > 1:
break
k = random.randint(0, self._q - 1)
r = (pow(g, k) % self._p, self._q)
print('r =', r)
x = random.randint(0, self._q - 1)
s = (get_inverse(k, self._q) * (self._hash(M) + x * r)) % self._q
print('s =', s)
return r, s
a = DSA('sha1', 160)
a.generate_p_q(1024, 160)
a.get_sign('lol')
|
import sqlite3
def banco():
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('''CREATE TABLE teste (id integer, nome text not null)''')
c.execute("INSERT INTO teste VALUES (1, 'felipe')")
c.execute("INSERT INTO teste VALUES (2, 'dias')")
c.execute("INSERT INTO teste VALUES (3, 'Impacta')")
c.execute('''SELECT count(*) FROM teste''')
x = None
for row in c.fetchall():
x = row
print(row)
conn.commit()
conn.close()
return x
def test_banco():
assert banco() == (3,), 'Deveria ser 3'
|
from setuptools import setup, find_namespace_packages
with open("README.rst", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="json2rst", # Replace with your own username
version="0.0.1-dev",
author="zed tan",
author_email="zed@shootbird.work",
description="Inelegant tool for converting JSON to rST list-tables",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/zeddee/bad-json-to-rst-tables",
project_urls={
"Bug Tracker": "https://github.com/zeddee/bad-json-to-rst-tables/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
package_dir={"": "."},
packages=find_namespace_packages(),
python_requires=">=3.6",
)
|
import numpy as np
import string, glob, sys
from collections import Counter
alphabet = string.ascii_lowercase
deletion_table_file = '/home/yao/Code/ConfusionTables/deletiontable.csv'
insertion_table_file = '/home/yao/Code/ConfusionTables/insertionstable.csv'
substitution_table_file = '/home/yao/Code/ConfusionTables/substitutionstable.csv'
reversal_table_file = '/home/yao/Code/ConfusionTables/transpositionstable.csv'
deletion_table = np.genfromtxt(deletion_table_file, delimiter=',')
insertion_table = np.genfromtxt(insertion_table_file, delimiter=',')
substitution_table = np.genfromtxt(substitution_table_file, delimiter=',')
reversal_table = np.genfromtxt(reversal_table_file, delimiter=',')
deletion_table = np.delete(np.delete(deletion_table, 0, axis=0), 0, axis=1)
insertion_table = np.delete(np.delete(insertion_table, 0, axis=0), 0, axis=1)
substitution_table = np.delete(np.delete(substitution_table, 0, axis=0), 0, axis=1)
reversal_table = np.delete(np.delete(reversal_table, 0, axis=0), 0, axis=1)
char2idx = dict((char, ord(char) - ord('a')) for char in alphabet)
char2idx[''] = 26
def Del(x, y):
return deletion_table[char2idx[x],char2idx[y]]
def Add(x, y):
return insertion_table[char2idx[x],char2idx[y]]
def Sub(x, y):
return substitution_table[char2idx[x],char2idx[y]]
def Rev(x, y):
return reversal_table[char2idx[x],char2idx[y]]
all_words_file = ['/home/yao/Downloads/NLP/spell_correction/english.0',
'/home/yao/Downloads/NLP/spell_correction/english.1',
'/home/yao/Downloads/NLP/spell_correction/english.2',
'/home/yao/Downloads/NLP/spell_correction/english.3']
all_words = []
for file in all_words_file:
with open(file) as f:
words = f.read().split()
all_words += ([w.split('/')[0] for w in words])
all_words = set(all_words)
all_words_join = ' '.join(all_words)
chars_x = dict((x, all_words_join.count(x)) for x in alphabet)
chars_xy = dict((x+y, all_words_join.count(x+y)) for x in alphabet for y in alphabet)
chars = dict(chars_x.items() + chars_xy.items())
freq = Counter()
for file in glob.glob("/home/yao/Downloads/NLP/spell_correction/ap8802*"):
with open(file) as f:
freq += Counter(f.read().split())
def Prob(word, x, y, trans):
if trans == 'deletion':
return (freq[word]+0.5)/sum(freq.values())*(Del(x, y)/chars[x+y])
elif trans == 'insertion':
return (freq[word]+0.5)/sum(freq.values())*(Add(x, y)/chars[x])
elif trans == 'substitution':
return (freq[word]+0.5)/sum(freq.values())*(Sub(x, y)/chars[y])
elif trans == 'reversal':
return (freq[word]+0.5)/sum(freq.values())*(Rev(x, y)/chars[x+y])
def Deletions(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [(a+b[1:], Prob(a+b[1:], a[-1], b[0], 'deletion'))
for a, b in splits if b and a+b[i:] in all_words]
return deletes
def Insertions(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
inserts = [(a+c+b, Prob(a+c+b, a[-1], c, 'insertion'))
for a, b in splits for c in alphabet if a+c+b in all_words]
return inserts
def Substitutions(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
substitutes = [(a+c+b[1:], Prob(a+c+b[1:], b[0], c, 'substitution'))
for a, b in splits for c in alphabet if b and a+c+b[1:] in all_words]
return substitutes
def Reversals(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
reversals = [(a+b[1]+b[0]+b[2:], Prob(a+b[1]+b[0]+b[2:], b[0], b[1], 'reversal'))
for a, b in splits if len(b)>1 and a+b[1]+b[0]+b[2:] in all_words]
return reversals
def Correct(word):
corrections = Deletions(word) + Insertions(word) + Substitutions(word) + Reversals(word)
return sorted(corrections, key=lambda x: x[1], reverse=True)
if __name__ == "__main__":
words = sys.argv[1:]
for word in words:
corr = Correct(word)
for c in corr:
print c[0],
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import matplotlib.pyplot as plt
import numpy as np #install using pip ==> pip install numpy
# ## Line and setp methods
# ### Recap of the line plot
# In[4]:
x = np.arange(10)
y1 = [1, 9, 7, 10, 3, 16, 2, 20, 5, 22]
y2 = [11, 22, 7, 1, 14, 6, 8, 2, 15, 3]
plt.plot(x, y1, '--r', linewidth = 4)
plt.plot(x, y2, '--b', linewidth = 3)
plt.show()
# ### Assign plots to lines variable and apply individual settings
# In[5]:
x = np.arange(10)
y1 = [1, 9, 7, 10, 3, 16, 2, 20, 5, 22]
y2 = [11, 22, 7, 1, 14, 6, 8, 2, 15, 3]
lines = plt.plot(x, y1, '--r', x, y2, '-b')
plt.show()
# ### Use setp method to apply bulk settings using single statement
# In[7]:
x = np.arange(10)
y1 = [1, 9, 7, 10, 3, 16, 2, 20, 5, 22]
y2 = [11, 22, 7, 1, 14, 6, 8, 2, 15, 3]
lines = plt.plot(x, y1, x, y2)
plt.setp(lines, color='g', linewidth=5)
plt.show()
|
from collections import Counter
from itertools import product
import numpy as np
from sklearn.metrics import accuracy_score
def get_unique_proba(labels):
total = len(labels)
c = {k: v/total for k,v in Counter(labels).items()}
return c
def gini_impurity(labels):
unique = np.array(list(get_unique_proba(labels).values()))
impurity = 1 - np.sum(np.power(unique, 2))
return impurity
def partition(data, q):
true_p = q.check(data).nonzero()[0]
false_p = (~q.check(data)).nonzero()[0]
return true_p, false_p
def weighted_gini_impurity(true, false):
p = len(true) / (len(false) + len(true))
i_t = gini_impurity(true)
i_f = gini_impurity(false)
return p * i_t + (1-p) * i_f
class Question():
def __init__(self, column, value):
self.column = column
self.value = value
def check(self, x):
return x[:, self.column] < self.value
def __repr__(self):
return "Q: x[%s] < %s" % (self.column, self.value)
def generate_questions(data, num_features):
columns = np.random.choice(range(data.shape[-1]), num_features, replace=False)
for c in columns:
unique_values = np.unique(data[:,c])
if len(unique_values) < 2:
continue
for uv in unique_values:
yield Question(c, uv)
class DecisionNode():
def __init__(self, true, false, q):
self.true = true
self.false = false
self.q = q
def make(self, data):
true_p, false_p = partition(data, self.q)
true_r = self.true.make(data[true_p])
flase_r = self.false.make(data[false_p])
results = np.zeros(data.shape[0])
results[true_p] = true_r
results[false_p] = flase_r
return results
def __get_str__(self, level=1):
b_str = str(self.q)
t_str = ' ' * level + 'T: ' + self.true.__get_str__(level+1)
f_str = ' ' * level + 'F: ' + self.false.__get_str__(level+1)
return b_str + '\n' + t_str + '\n' + f_str
def __repr__(self):
return self.__get_str__()
class LeafNode():
def __init__(self, probas):
self.result = max(probas.items(), key=lambda x: x[1])[0]
def make(self, data):
return np.array([self.result] * data.shape[0])
def __get_str__(self, level):
return str(self)
def __repr__(self):
return str(self.result)
class Tree():
def __init__(self):
self.tree = None
self.num_features = None
def preprocess_and_assert(func):
def call(self, data, labels = None, *kargs, **kwargs):
#preprocess
data = np.array(data)
if labels is not None:
labels = np.array(labels)
#assert
self.assert_data_format(data, labels)
#call
if not labels is not None:
return func(self, data)
return func(self, data, labels, *kargs, **kwargs)
return call
def assert_data_format(self, data, labels=None):
assert len(data.shape) == 2
if self.num_features:
assert data.shape[1] == self.num_features
if labels is not None:
assert len(labels.shape) == 1
assert labels.shape[0] == data.shape[0]
@preprocess_and_assert
def predict(self, data):
return self.tree.make(data)
def __max_num_features__(self, mf):
if isinstance(mf, int):
return mf
if isinstance(mf, float):
return int(np.round(self.num_features * mf))
if mf == 'sqrt':
return int(np.round(np.sqrt(self.num_features)))
if mf == 'log2':
return int(np.round(np.log2(self.num_features)))
return self.num_features
@preprocess_and_assert
def fit(self, data, labels, max_depth=10, min_samples=1, \
max_num_features='sqrt'):
self.num_features = data.shape[1]
self.max_depth = max_depth
self.min_samples = min_samples
self.max_num_features = self.__max_num_features__(max_num_features)
self.tree = self.__build_node__(data, labels)
# print(self.tree)
@preprocess_and_assert
def evaluate(self, data, labels):
predictions = self.predict(data)
return accuracy_score(labels, predictions)
@staticmethod
def __evaluate_question__(data, labels, initial_impurity, q):
# Partitonate indexes by question
true_idx, false_idx = partition(data, q)
# Compute weighted sum of gini impurity for both subsets
weighted_partition_impurity = \
weighted_gini_impurity(labels[true_idx], labels[false_idx])
# Information gain after answering question
information_gain = initial_impurity - weighted_partition_impurity
return (information_gain, q)
def __build_node__(self, data, labels, depth=0):
# Compute initial gini impurity
impurity = gini_impurity(labels)
information_gain, question = None, None
# If labels subset has impurity
if impurity > 0 and depth < self.max_depth and data.shape[0] > self.min_samples:
# Evaluate every posible question checking information gain on data subset
questions_evaluation = \
[Tree.__evaluate_question__(data, labels, impurity, q)
for q in generate_questions(data, self.max_num_features)]
# There may be no question to evaluate (Unseparable data -> Leaf Node)
if questions_evaluation:
# Save question which gives max information gain
information_gain, question = \
max(questions_evaluation, key=lambda x:x[0])
# There may be no information gain on every question (Unseparable data -> Leaf Node)
if information_gain:
# Decision Node build on top of new evaluation on new data subsets
true_idx, false_idx = partition(data, question)
true = self.__build_node__(data[true_idx], labels[true_idx], depth+1)
false = self.__build_node__(data[false_idx], labels[false_idx], depth+1)
return DecisionNode(true, false, question)
# if data.shape[0] <= self.min_samples and impurity != 0:
# print("Failed on min_samples", data.shape[0], "depth", depth+1)
# Leaf Node :labels subset has no imprity, no questions to ask or 0 information gain)
return LeafNode(get_unique_proba(labels))
class RandomForest():
def __sample_size__(self, ss, total):
if isinstance(ss, int):
return ss
if isinstance(ss, float):
return np.max([10, int(np.round(ss * total))])
return np.max([10, int(np.round(total / self.n_estimators * 2))])
def fit(self, data, labels, n_estimators=10, max_depth=5, min_samples=1, max_num_features='sqrt', \
sample_size='auto'):
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_num_features = max_num_features
self.min_samples = min_samples
self.sample_size = self.__sample_size__(sample_size, data.shape[0])
self.forest = self.__build_forest__(data, labels)
def predict(self, data):
predictions = np.zeros((data.shape[0], self.n_estimators))
for i in range(self.n_estimators):
predictions[:, i] = self.forest[i].predict(data)
predictions = predictions.astype(int)
return np.apply_along_axis(np.bincount, 1, predictions).argmax(axis=1)
def evaluate(self, data, labels):
predictions = self.predict(data)
return accuracy_score(labels, predictions)
def __bagging__(self, data, labels):
for i in range(self.n_estimators):
bag_idx = np.random.choice(range(data.shape[0]), self.sample_size, replace=True)
yield data[bag_idx], labels[bag_idx]
def __build_forest__(self, data, labels):
forest = []
for b_data, b_labels in self.__bagging__(data, labels):
t = Tree()
t.fit(b_data, b_labels, max_depth=self.max_depth, min_samples=self.min_samples, \
max_num_features=self.max_num_features)
forest.append(t)
return forest
# data = np.array([[1,1,1,2,2,2,2],[1,3,2,3,2,3,1]]).T
# labels = np.array([0,1,1,0,0,1,1])
# t = Tree()
# t.fit(data, labels)
# test = list(product([1,2], [1,2,3]))
# print(test)
# t.predict(test)
# t.evaluate(test, [0, 1, 1, 1, 0, 0])
import pandas as pd
data = pd.read_csv('data.csv').values
labels = np.array([_ == 'M' for _ in data[:, -1]]).astype(int)
data = data[:, :-1]
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = \
train_test_split(data, labels, test_size=0.2)
t = RandomForest()
for ss in [None, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 0.5]:
t.fit(x_train, y_train, n_estimators=100, max_depth=7, min_samples=5, sample_size=ss)
print(ss, t.evaluate(x_train, y_train), t.evaluate(x_test, y_test))
t = Tree()
for md in [1, 3, 5, 7, 10]:
for ms in [1, 15, 30, 50, 70, 85, 110]:
t.fit(x_train, y_train, max_depth=md, min_samples=ms, \
max_num_features='sqrt')
print(md, ms, t.evaluate(x_train, y_train), t.evaluate(x_test, y_test))
|
from dataclasses import dataclass, field
from abc import abstractmethod
from typing import List
import copy
class PersistenceProvider:
@abstractmethod
def save_state(self, path: List[str], state: dict):
pass
def get_state(self, path: List[str]) -> dict:
pass
EMPTY_STATE = {
"local": "__empty__",
"subflows": {},
"is_done": False,
"return_value": None,
}
@dataclass(frozen=True)
class DialogState:
persistence: PersistenceProvider
path: List[str] = field(default_factory=list)
def set_default_state(self, state):
if self.get_state() == "__empty__":
self.save_state(state)
def save_state(self, state):
previous_state = self._get_full_state()
new_state = {**previous_state, "local": state}
self.persistence.save_state(self.path, new_state)
def get_state(self):
return self._get_full_state()["local"]
def _get_full_state(self):
state = self.persistence.get_state(self.path)
if not state:
new_empty_state = copy.deepcopy(EMPTY_STATE)
self.persistence.save_state(self.path, new_empty_state)
state = EMPTY_STATE
return state
def subflow(self, subflow_id: str):
return DialogState(
persistence=self.persistence, path=[*self.path, "subflows", subflow_id]
)
def set_return_value(self, return_value):
state = self._get_full_state()
if state["is_done"]:
raise Exception("Dialog is done, cannot set return value")
state["return_value"] = return_value
state["is_done"] = True
self.persistence.save_state(self.path, state)
def get_return_value(self) -> object:
state = self.persistence.get_state(self.path)
if not state["is_done"]:
raise StopIteration("Dialog not done yet")
return state["return_value"]
def is_done(self) -> bool:
return self._get_full_state()["is_done"]
|
# this app is an instagram crawler that uses your username and password to log In and then it goes to the target username that you give to the program.
# Then it scrolls down and prints the links of all images.
# The target username is sepehr.akbarzadeh by default but you can change it.
# Developer : Shahriar Hashemi
# LinkedIn page : https://www.linkedin.com/in/shahriar-hashemi/
# Email : shriar.ha@gmail.com
from bs4 import BeautifulSoup
from selenium import webdriver
from time import sleep
class App:
def __init__(self,username="Enter your username here",password="Enter your password here",target_username="sepehr.akbarzadeh"):
self.username = username
self.password = password
self.target_username = target_username
self.driver = webdriver.Chrome("/Users/Shahriar/Desktop/Selenium and BS projects/chromedriver.exe") #This is the path to webdriver in my PC ,you should change it and give the path of where your webdriver is located.
self.main_url = "https://www.instagram.com"
self.driver.get(self.main_url)
sleep(5)
self.log_in()
self.close_notification()
self.go_to_target_profile()
sleep(3)
self.scroll_down()
sleep(15)
self.get_images_links()
def close_notification(self):
try:
sleep(3)
close_noti_btn = self.driver.find_element_by_xpath("//button[contains(text(),'Not Now')]")
close_noti_btn.click()
sleep(2)
except:
pass
def get_images_links(self): #only gives 40 links beacuse of instagram limits
soup = BeautifulSoup(self.driver.page_source, "lxml")
all_images = soup.find_all("img")
for image in all_images:
print(image["src"])
def go_to_target_profile(self):
target_profile_url = self.main_url + "/" + self.target_username + "/"
self.driver.get(target_profile_url)
def scroll_down(self):
number_of_posts = self.driver.find_element_by_xpath("//span[@class='g47SY ']")
number_of_posts = str(number_of_posts.text).replace(",","")
number_of_posts = int(number_of_posts)
if number_of_posts > 12:
number_of_scrolls = (number_of_posts / 12)+3
for i in range(int(number_of_scrolls)):
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(2)
def log_in(self):
login_button = self.driver.find_element_by_xpath("//a[@href='/accounts/login/?source=auth_switcher']")
login_button.click()
sleep(5)
username_input = self.driver.find_element_by_xpath("//input[@name='username']")
username_input.send_keys(self.username)
password_input = self.driver.find_element_by_xpath("//input[@name='password']")
password_input.send_keys(self.password)
password_input.submit()
if __name__ == "__main__":
app = App() |
from flask import Flask, render_template, request
app = Flask(__name__)
ans = {"ans1": "hummingbird moth", "ans2": "mimic octopus"}
def checkans(guess, ans):
return guess == ans
@app.route("/")
@app.route("/home")
def home():
return render_template("home.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/guess", methods = ["GET", "POST"])
@app.route("/guess/", methods = ["GET", "POST"])
def guess():
if request.method == "GET":
return render_template("guess.html")
else:
answer = request.form['answer']
if checkans(answer, ans["ans1"]):
return "<h1>Correct! <a href = '/guess2'>You may proceed</a></h2>"
else:
error = "Try again"
return render_template("guess.html", error = error)
@app.route("/guess2", methods = ["GET", "POST"])
@app.route("/guess2/", methods = ["GET", "POST"])
def guess2():
if request.method == "GET":
return render_template("guess2.html")
else:
answer = request.form['answer']
if checkans(answer, ans["ans2"]):
return "<h1>Wonderful! <a href = '/home'>Return</a></h2>"
else:
error = "Try again"
return render_template("guess2.html", error = error)
if __name__ == "__main__":
app.debug = True
app.run(host = '0.0.0.0', port = 8000)
|
# Generated by Django 2.0.2 on 2019-09-26 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20190927_0044'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='person',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
from settings import PATH_TO_NUMPY_DATA_FOLDER
from np_datasets_wizard.utils import np_to_json
import easygui
import json
import numpy as np
def get_lead_signal(ecg, lead_name):
return ecg['Leads'][lead_name]['Signal']
def cut_from_signal(ecg, start_point, leads_names, patch_len):
result = []
for lead_name in leads_names:
lead_signal = get_lead_signal(ecg, lead_name)
end_point = start_point + patch_len
if start_point < 0 or end_point > len(lead_signal):
return None
result.append(lead_signal[start_point:end_point])
return result
def get_numpy_from_json(json_data, patch_len, leads_names):
start_point = 0
result = []
for patient_id in json_data.keys():
patient = json_data[patient_id]
cutted = cut_from_signal(patient, start_point, leads_names, patch_len)
if cutted is not None:
result.append(cutted)
return np.array(result)
def select_and_load_json():
file_path = easygui.fileopenbox("Select json with data")
with open(file_path, 'r') as f:
return json.load(f)
def make_and_save_dataset(patch_len, name, leads_names):
json_data = select_and_load_json()
numpy_data = get_numpy_from_json(json_data, patch_len, leads_names)
file_path = PATH_TO_NUMPY_DATA_FOLDER + "\\" + name + ".json"
np_to_json(numpy_data, file_path)
print("np dataset saved to " + str(file_path))
print("shape: " + str(numpy_data.shape))
if __name__ == "__main__":
make_and_save_dataset(4998, "t_without_qrs_visualis", ["i"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.