text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
#
# Copyright (c) 2011 Polytechnic Institute of New York University
# Author: Adrian Sai-wah Tam <adrian.sw.tam@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of New York University.
#
# Traffic matrix generator
# Read in a set of nodes and give uniform random [0:1] load to every pair of
# distinct nodes. The output is suitable for use in kpath.py, ecmp.py and
# other related scripts.
#
import sys,getopt,random
###########################################################
# Global parameters
topofile = 'topology.txt' # default topology file
fullload = False # if true, the load is constantly 1
#random.seed(1) # Debug use: Uncomment this line for repeatible random numbers
optlist, userlist = getopt.getopt(sys.argv[1:], 't:1h')
for opt, optarg in optlist:
if opt == '-t':
topofile = optarg
elif opt == '-1':
fullload = True
else:
# getopt will fault for other options
print "Available options"
print " -t file : The topology file in Rocketfuel format, default is topology.txt"
print " -1 : Generate constant load of 1 for each pair of nodes"
print " -h : This help message"
sys.exit(1)
###########################################################
# Helper functions
def ReadNodes(f):
"""
Read in a Rocketfuel format topology file for the list of nodes
"""
topoFile = open(f, "r") # Topology file
nodes = [] # names of nodes
for line in topoFile:
token = line.split()
if (len(token) < 2): continue
if token[0] == "N": # specifying a node by its name
nodes.append(token[1])
topoFile.close()
return nodes
###########################################################
# Main program
# Read in nodes, for each pair of distinct nodes, create a random value
nodes = ReadNodes(topofile)
for s in nodes:
for t in nodes:
if t == s: continue
value = 1 if fullload else random.random()
print "%s %s %f" % (s, t, value)
|
def df1(n):
n1 = 1
n2 = 1
n3 = 1
L = []
if n <= 2:
for i in range(1,n+1):
L.append(1)
else:
L.extend([1,1])
while n > 2:
n3 = n1 + n2
n1 = n2
n2 = n3
n = n - 1
L.append(n3)
return L
ipt = input('输入正整数:')
L = df1(int(ipt))
print(L)
|
# Generated by Django 3.1.3 on 2020-12-05 21:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rest_api', '0020_auto_20201206_0227'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='publication',
field=models.ManyToManyField(blank=True, related_name='tags', to='rest_api.Publication', verbose_name='Публиация'),
),
]
|
from playwright.sync_api import Page
class TestCases:
def __init__(self, page: Page):
self.page = page
def check_test_exists(self, test_name: str):
return self.page.query_selector(f'css=tr >> text=\"{test_name}\"') is not None
def delete_test_by_name(self, test_name: str):
row = self.page.query_selector(f'*css=tr >> text=\"{test_name}\"')
row.query_selector('.deleteBtn').click()
|
import matplotlib.pyplot as plt
import numpy as np
import uncertainties.unumpy as unp
import scipy.constants as con
from scipy.optimize import curve_fit
from scipy import stats
from uncertainties import ufloat
############################ SOLENOID GROSS ####################################
xG, BG = np.genfromtxt('data/solenoid_gross.txt', unpack=True)
# def B_ls(x,I,N,L,R): #Theoriekurve Lange Spule, falsche Formel
# return (N * I * con.mu_0 * 6) / 2 * ((x + L / 2) / (np.sqrt(R**2 + (x + L / 2)**2)) - (x-L/2)/(np.sqrt(R**2+(x-L/2)**2)))
def Bls(x, I, N, L, R): #Lange Spule
return (con.mu_0 * I * N) / (2 * L) * (x / (np.sqrt(R**2 + x**2)) + (L - x) / (np.sqrt(R**2 + (L - x)**2)))
# def Bls2(x, I, N, L, R):
# return (con.mu_0 * I * N) * (R**2 / (np.sqrt(x**2 - R * x + (5/4) * R**2)))
# paramsG, covG = curve_fit(B_ls, xG, BG) # Regression
# print('\nGrosse Spule:', '\nParameter: ', paramsG, '\nFehler: ', np.sqrt(np.diag(covG)))
# paramsG, covG = curve_fit(Bls, xG, BG) # Regression
# print('\nGrosse Spule:', '\nParameter: ', paramsG, '\nFehler: ', np.sqrt(np.diag(covG)))
I1 = 1.4
N1 = 300
L1 = 0.16
R1 = 0.0205
xls =np.linspace(8.8, 28.8, 1000)
print('\nB_Theo_Max (lange Spule): ', np.amax(Bls(xls/100, I1, N1, L1, R1)))
plt.plot(xG, BG, 'kx', label='Messwerte')
#plt.plot(xls, Bls(xls, *paramsG), label='Fit')
#plt.plot(xG, B_ls(xG, *paramsG), label='Test-Fit')
#plt.plot(x, B_ls(x/100, I1, N1, L1, R1)*10**(3), label='Theoriekurve')#x/100 wegen m->cm; B*1000 wegen T->mT
plt.plot(xls-8.8, Bls(xls/100, I1, N1, L1, R1)*10**(3), label='Theoriekurve')
#plt.plot(x, Bls2(x/100, I1, N1, L1, R1)*10**(3), label='Theorie-Test 2')
plt.xlabel(r'x \:/\: \si{\centi\meter}')
plt.ylabel(r'B \:/\: \si{\milli\tesla}')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/solenoid_gross.pdf')
plt.close()
############################ SOLENOID KLEIN ####################################
xK, BK = np.genfromtxt('data/solenoid_klein.txt', unpack=True)
I2 = 1.4
N2 = 100
L2 = 0.055
R2 = 0.0205
xks =np.linspace(-2, 18.5, 1000)
print('\nB_Theo_Max (kurze Spule): ', np.amax(Bls(xks/100, I2, N2, L2, R2)))
print('\nB_Theo_Max (kurze Spule): ', np.amax(Bls(3.5, I2, N2, L2, R2)))
plt.plot(xK, BK, 'kx', label='Messwerte')
plt.plot(xks+1.5, Bls(xks/100, I2, N2, L2, R2)*10**(3), label='Theoriekurve')
plt.xlabel(r'x \:/\: \si{\centi\meter}')
plt.ylabel(r'B \:/\: \si{\milli\tesla}')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/solenoid_klein.pdf')
plt.close()
############################ HELMHOLTZ-SPULENPAAR ##############################
# def B_h(x,I,N,d,R,b): #falsch
# return (N * I * con.mu_0/2) * ((d / 2 + b / 2) / np.sqrt(R**2 + (d / 2 + b / 2)**2) - (d / 2 - b / 2) / np.sqrt(R**2 + (d / 2 - b / 2)**2) + ((-d) / 2 + b / 2) / np.sqrt(R**2 + ((-d) / 2 + b / 2)**2) - ((-d) / 2 - b / 2) / np.sqrt(R**2 + ((-d) / 2 - b / 2)**2))
def Bh(x, I, R, N, d):
return (con.mu_0 / 2) * ((N * I) / R) * (1 / (1 + ((x - (d / 2)) / R)**2)**(3/2) + 1 / (1 + ((x + d / 2) / R)**2)**(3/2))
def Bh1(x, I, R, N, d):
return (con.mu_0 / 2) * ((N * I) / R) * (1 / (1 + ((x - (d / 2)) / R)**2)**(3/2))
def Bh2(x, I, R, N, d):
return (con.mu_0 / 2) * ((N * I) / R) * (1 / (1 + ((x + d / 2) / R)**2)**(3/2))
Ih1 = 2.5 # A
Ih2 = 5.0 # A
# b = 3.3 * 10**(-2) # cm zu m
N = 100
R = 6.25*10**(-2)
d1 = 6.25*10**(-2)
d2 = 12.5*10**(-2)
xf = np.linspace(0, 25, 1000)
####### 1 ####### d = 6.25 cm, I = 2.5 V
x1, B1 = np.genfromtxt('data/spulenpaar_6-25_2-5.txt', unpack=True)
paramsh1, covh1 = curve_fit(Bh, x1, B1) # Regression
# print('\nHelmholtz (d=6.25cm, I=2.5V):', '\nParameter: ', paramsh1, '\nFehler: ', np.sqrt(np.diag(covh1)))
xh1 = np.linspace(-4, 21, 1000)
print('\nB_Theo_Max (HELMHOLTZ 1): ', np.amax(Bh(xh1/100, Ih1, R, N, d1)))
plt.plot(x1, B1, 'kx', label='Messwerte')
plt.plot(xh1+4, Bh(xh1/100, Ih1, R, N, d1)*10**3, label='Theoriekurve')
# plt.plot(xh1+4, Bh1(xh1/100, Ih1, R, N, d1)*10**3, label='Einzelspule 1')
# plt.plot(xh1+4, Bh2(xh1/100, Ih1, R, N, d1)*10**3, label='Einzelspule 2')
# plt.plot(xh1+4, Bh(xh1, *paramsh1), label='Fit')
#plt.plot(xf, Bh(xf, *paramsh1), color='red', label='Fit')
#plt.plot(x1, B_h(x1/100, Ih1, N, d1, R, b)*10**3)
#plt.plot(x_hh,B_hh(x_hh/100,5,100,0.06,0.125,0.033)*1000,label='Theoriekurve')#werte zu prüfen #Dimensionsfehler, B_hh() scheint kein Vektor mehr zu sein, warum auch immer
plt.xlabel(r'x \:/\: \si{\centi\meter}')
plt.ylabel(r'B \:/\: \si{\milli\tesla}')
plt.xlim(0, 25.5)
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/spulenpaar1.pdf')
plt.close()
####### 2 ####### d = 6.25 cm, I = 5.0 V
x2, B2 = np.genfromtxt('data/spulenpaar_6-25_5-0.txt', unpack=True)
# paramsh2, covh2 = curve_fit(Bh, x2, B2) # Regression
# print('\nHelmholtz (d=6.25cm, I=5.0V):', '\nParameter: ', paramsh2, '\nFehler: ', np.sqrt(np.diag(covh2)))
xh2 = np.linspace(-3, 22, 1000)
print('\nB_Theo_Max (HELMHOLTZ 2): ', np.amax(Bh(xh2/100, Ih2, R, N, d1)))
plt.plot(x2, B2, 'kx', label='Messwerte')
plt.plot(xh2+3, Bh(xh2/100, Ih2, R, N, d1)*10**3, label='Theoriekurve')
# plt.plot(xh2+3, Bh1(xh2/100, Ih1, R, N, d1)*10**3, label='Einzelspule 1')
# plt.plot(xh2+3, Bh2(xh2/100, Ih1, R, N, d1)*10**3, label='Einzelspule 2')
#plt.plot(xf, Bh(xf, *paramsh2), color='red', label='Fit')
plt.xlabel(r'x \:/\: \si{\centi\meter}')
plt.ylabel(r'B \:/\: \si{\milli\tesla}')
plt.xlim(0, 25.5)
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/spulenpaar2.pdf')
plt.close()
####### 3 ####### d = 12.5 cm, I = 5.0 V
x3, B3 = np.genfromtxt('data/spulenpaar_12-5_5-0.txt', unpack=True)
#paramsh3, covh3 = curve_fit(Bh, x3, B3) # Regression
#print('\nHelmholtz (d=12.5cm, I=5.0V):', '\nParameter: ', paramsh3, '\nFehler: ', np.sqrt(np.diag(covh3)))
xh3 = np.linspace(-6, 19, 1000)
xh3b = np.linspace(0, 9, 1000)
print('\nB_Theo_Max (HELMHOLTZ 3): ', np.amin(Bh(xh3b/100, Ih2, R, N, d2)))
print('\nB_Exp_Min (HELMHOLTZ 3): ', np.amin(B3[0:33]))
plt.plot(x3, B3, 'kx', label='Messwerte')
plt.plot(xh3+6, Bh(xh3/100, Ih2, R, N, d2)*10**3, label='Theoriekurve')
# plt.plot(xh3+6, Bh1(xh3/100, Ih1, R, N, d1)*10**3, label='Einzelspule 1')
# plt.plot(xh3+6, Bh2(xh3/100, Ih1, R, N, d1)*10**3, label='Einzelspule 2')
#plt.plot(xf, Bh(xf, *paramsh3), color='red', label='Fit')
plt.xlabel(r'x \:/\: \si{\centi\meter}')
plt.ylabel(r'B \:/\: \si{\milli\tesla}')
plt.legend(loc='best')
plt.xlim(0, 25.3)
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/spulenpaar3.pdf')
plt.close()
############################## HYSTERESEKURVE ##################################
def Bt(I, mu_r, N, R):
return (con.mu_0 * mu_r * N * I) / (2 * np.pi * R)
Nt = 595
Rt = 0.13 # m
mu_r = 300
def mu_r(H, B):
return (B / H) / con.mu_0
# Neukurve
I1, Bt1 = np.genfromtxt('data/toroidalspule_neukurve.txt', unpack=True)
# Kurve A
I2, Bt2 = np.genfromtxt('data/toroidalspuleA.txt', unpack=True)
# Kurve B
I3, Bt3 = np.genfromtxt('data/toroidalspuleB.txt', unpack=True)
H1 = (I1 * Nt) / (2 * np.pi * Rt)
H2 = (I2 * Nt) / (2 * np.pi * Rt)
H3 = (I3 * Nt) / (2 * np.pi * Rt)
print('\nH1: ', H1, '\nH2: ', H2, '\nH3: ', H3,)
#paramsTn, covTn = curve_fit(mu_r, H1, Bt1)
#paramsTA, covTA = curve_fit(mu_r, H2, Bt2)
#paramsTB, covTB = curve_fit(mu_r, H3, Bt3)
#print('\nToroidspule:', '\nParameter: ', paramsTn, '\nFehler: ', np.sqrt(np.diag(covTn)))
xt = np.linspace(-10, 10, 1000)
#plt.plot(H1, Bt1*10**(-3), 'kx', label='Messwerte')
plt.plot(H1, Bt1*10**(-3), 'g-', label='Neukurve')
#plt.plot(H2, Bt2*10**(-3), 'kx')
plt.plot(H2, Bt2*10**(-3), 'b-', label='Hysteresekurve')
#plt.plot(H3, Bt3*10**(-3), 'kx')
plt.plot(H3, Bt3*10**(-3), 'b-')
#plt.xlabel(r'I \:/\: \si{\ampere}')
plt.xlabel(r'H \:/\: \si[per-mode=reciprocal]{\ampere\per\meter}')
plt.ylabel(r'B \:/\: \si{\tesla}')
plt.vlines(0, ymin=-800, ymax=800, linewidth=.5)
plt.hlines(0, xmin=-8000, xmax=8000, linewidth=.5)
plt.hlines(0.690, xmin=-8000, xmax=8000, linestyle='dotted', color='red', label='magnetische Sättigung')
plt.hlines(-0.690, xmin=-8000, xmax=8000, linestyle='dotted', color='red')
plt.xlim(-8000, 8000)
plt.ylim(-0.8, 0.8)
plt.yticks([-0.8, -0.690, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.690, 0.8],
[-0.8, -0.69, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.69, 0.8])
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/hysterese.pdf')
plt.close()
#### VERGÖßERUNG
Hv1 = H1[0:3]
Btv1 = Bt1[0:3]
Hv2 = H2[9:12]
Btv2 = Bt2[9:12]
Hv3 = H3[9:12]
Btv3 = Bt3[9:12]
plt.plot(Hv1, Btv1*10**(-3), 'g-', label='Neukurve')
plt.plot(Hv2, Btv2*10**(-3), 'b-', label='Hysteresekurve')
plt.plot(Hv3, Btv3*10**(-3), 'b-')
plt.xlabel(r'H \:/\: \si[per-mode=reciprocal]{\ampere\per\meter}')
plt.ylabel(r'B \:/\: \si{\tesla}')
plt.vlines(0, ymin=-0.35, ymax=0.35, linewidth=.7)
plt.hlines(0, xmin=-728.5, xmax=728.5, linewidth=.7)
plt.vlines(-448.5503372, ymin=-0.35, ymax=0.35, linestyle='dotted', color='red', label=r'Koerzitivkraft $\pm H_{\text{C}}$')
plt.vlines(448.5503372, ymin=-0.35, ymax=0.35, linestyle='dotted', color='red')
plt.hlines(0.1234, xmin=-728.5, xmax=728.5, linestyle='dotted', color='darkorange', label=r'Remanenz $\pm B_{\text{R}}$')
plt.hlines(-0.1234, xmin=-728.5, xmax=728.5, linestyle='dotted', color='darkorange')
plt.xlim(-728.5, 728.5)
plt.ylim(-0.35, 0.35)
plt.xticks([-600, -448.5503372, -200, 0, 200, 448.5503372, 600],
[-600, -448.6, -200, 0, 200, 448.6, 600])
plt.yticks([-0.3, -0.2, -0.1234, 0, 0.1234, 0.2, 0.3],
[-0.3, -0.2, -0.12, 0, 0.12, 0.2, 0.3])
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/hysterese_gross.pdf')
plt.close()
|
from . mab import Bandit, SimpleBandit, GradientBandit
from . callback import Callback
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 14:00:59 2017
Code for degrading images - DO NOT RUN ON PPPXEE
@author: ppxee
"""
### Import required libraries ###
import matplotlib.pyplot as plt #for plotting
from astropy.io import fits #for handling fits
import numpy as np #for handling arrays
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
plt.close('all') #close any open plots
#from scipy import ndimage
#import math
#from astropy.stats import median_absolute_deviation
def FWHM2sigma(FWHM, const):
''' Function to convert the FWHM of a distribution into a sigma for that
distribution. It assumes the distribution is gaussian.
Input:
FWHM = Full width half maximum of a distriubtution (in my case usually
of an object from SExtractor)
Output:
sigma = standard deviation value of a guassian distribution with the
given FWHM. This roughly equates to the psf of the object. '''
FWHM /= const
return FWHM/np.sqrt(8*np.log(2))
def fluxrad2sigma(fluxrad):
return fluxrad/np.sqrt(8*np.log(2))
sem05B = fits.open('SE_outputs_yearstacks/05B_output.fits')[1].data
sem07B = fits.open('SE_outputs_yearstacks/07B_output.fits')[1].data
sem08B = fits.open('SE_outputs_yearstacks/08B_output.fits')[1].data
sem09B = fits.open('SE_outputs_yearstacks/09B_output.fits')[1].data
sem10B = fits.open('SE_outputs_yearstacks/10B_output.fits')[1].data
sem11B = fits.open('SE_outputs_yearstacks/11B_output.fits')[1].data
sem12B = fits.open('SE_outputs_yearstacks/12B_output.fits')[1].data
hdr08B = fits.getheader('UDS_08B_K.fits') # random year (same in all)
const = -hdr08B['CD1_1'] # constant that defines unit conversion for FWHM
colname = 'FLUX_RADIUS'#'FWHM_WORLD'
#data = sem05B[colname][:,1]
#Extract the flux radii and remove negative values
fluxrad05 = sem05B[colname][:,1]
fluxrad05 = fluxrad05[fluxrad05>0]
fluxrad07 = sem07B[colname][:,1]
fluxrad07 = fluxrad07[fluxrad07>0]
fluxrad08 = sem08B[colname][:,1]
fluxrad08 = fluxrad08[fluxrad08>0]
fluxrad09 = sem09B[colname][:,1]
fluxrad09 = fluxrad09[fluxrad09>0]
fluxrad10 = sem10B[colname][:,1]
fluxrad10 = fluxrad10[fluxrad10>0]
fluxrad11 = sem11B[colname][:,1]
fluxrad11 = fluxrad11[fluxrad11>0]
fluxrad12 = sem12B[colname][:,1]
fluxrad12 = fluxrad12[fluxrad12>0]
#Put mean data in array
avgFR = np.array([np.median(fluxrad05), np.median(fluxrad07),
np.median(fluxrad08), np.median(fluxrad09),
np.median(fluxrad10), np.median(fluxrad11),
np.median(fluxrad12)])
avgFWHM = avgFR*2
### Find maximum FWHM as this is what all the others willl become ###
aimind = np.argmax(avgFWHM)
#
### Convert FWHM into a sigma ###
sigmaold = np.array([fluxrad2sigma(fwhm) for fwhm in avgFWHM])
sigmabroad = sigmaold[aimind]
### Find required sigma ###
# sigker^2 = sigbroad^2 - signar^2
sigmakernel = np.array([np.sqrt(sigmabroad**2 - sigma**2) for sigma in sigmaold])
def convolve_image(filename, sigmakernel):
### Open image ###
im05Bfull = fits.open(filename, memmap=True)
im05B = im05Bfull[0].data
hdr5 = im05Bfull[0].header
### Convolve Image ###
print('Convolving', filename)
kernel = Gaussian2DKernel(sigmakernel)
plt.figure()
plt.imshow(kernel)
# newim05B = convolve(im05B, kernel, normalize_kernel=True)
# ### Save the file ###
# hdu = fits.PrimaryHDU(newim05B, header=hdr5)
# hdulist = fits.HDUList([hdu])
# newfilename = 'newFR_' + filename
# hdulist.writeto(newfilename, overwrite=True)
# im05Bfull.close()
# del im05Bfull[0].data
convolve_image('UDS_05B_K.fits', sigmakernel[0])
convolve_image('UDS_08B_K.fits', sigmakernel[2])
convolve_image('UDS_09B_K.fits', sigmakernel[3])
convolve_image('UDS_10B_K.fits', sigmakernel[4])
convolve_image('UDS_11B_K.fits', sigmakernel[5])
convolve_image('UDS_12B_K.fits', sigmakernel[6])
|
# This is a sample script to operate a txt file.
class TxtFileOperate(object):
def __init__(self, fileName):
self.fileName = fileName
self.fp = open(fileName, 'w+')
def OpenFile(self, fileName):
self.fileName = fileName
self.fp = open(fileName, 'w+')
def Close(self):
self.fp.close()
def Write(self, size):
self.fp.write(size)
def Read(self, buf):
self.fp.read(buf)
test = TxtFileOperate("test.txt")
test.Write("Test")
test.Close()
test = TxtFileOperate("test.txt")
buf = test.Read(100)
print buf;
test.Close()
|
# this sign is used to write comment in your code in order to keep track of you code
#python most common version are 2 and 3
#To start with python is very easy
#the fist lesson [+,-,/,*]
#in order to write program here it litle bit different from python shell
#in order to print any word or number the word(print is used)
#Addition:
print(2+2)
print(545446+545454)
print(25+(254+54))
#substraction:
print(5-5)
print(55-464)
print(9898-(5454-121))
#Dividing
print(5/5)
print(898/(5757+8742-447))
#Multiplication
print(8*8)
print(545/(55*555))
#Exponental (concentation)
# * it ussually written as multiplication only this quoted with mark to differentiate it
print(4* '5')
print(55* "5") #it doesnt matter what quote mark it is
#this will print 5 by four time and not four times five,, remember when you see the quote mark that mearn we have change into \n string and not multiplication
#we can use this on any code requring numbers and that is including decimal points
#if you want to use text in python, we use strings
print("Example when we write sentence and it is too long we can use \n to break the sencence and make it two")
print("you cant use the break\n on numbers it will cause error especially when we do calculations")
#Input
input = ("Enter a name:")
print(input)
#you can use input to enter any code you want it to perform
input1 = ("Enter a name:")
input2 = ("Enter another number:")
print("This is what you enter" + " " + 'input1' + " " + 'input2')
#to understand this more lest look to another example
#to get deep understand of this we have intergers,float and strings
#strings are word
#float are used to turn strings into numbers
print(int("4") + int("5"))
#this will work because its only a number now lets say we want a user to have input we will say
print(float(input("Enter a number: ")) + float(input("Enter another number: ")))
#this can be used to enter any information to the code
# another way to work around it isby
x = input("Enter a number: ")
print(x)
y = input("Enter another number: ")
print(y)
result = float(x) + float(y)
print(result)
#variables
# to assign varibale in python which plays a big role like any other programming language here are some examples
var = 5
print(var)
del var #this delete the variable which you decleare before
var = 12
print(var) #now it will print the new assigned variable
#you can use this fow any code input you want
|
class Tool(object):
# 使用赋值语句定义类属性,记录所有工具对象的数量
count = 0
def __init__(self, name):
self.name = name
# 让类属性的值 +1
Tool.count += 1
# 创建工具对象
tool1 = Tool("刀")
tool2 = Tool("剑")
tool3 = Tool("钉")
# 输出工具对象的总数 类名.类属性
# print(Tool.count)
# 不推荐对象.类属性
tool3.count = 99
print("工具对象总数 %d" % tool3.count)
print("---> %d" % Tool.count)
|
import json
import sys
args = sys.argv
with open(args[1], "r") as f:
contents = json.load(f)
with open(args[2], "r") as f:
second = json.load(f)
contents["tests"].extend(second["tests"])
cleaned = {}
for m in contents['tests']:
l = cleaned.get(m['id'], [])
l.append(m)
cleaned[m['id']] = l
for k in cleaned.keys():
if len(cleaned[k]) > 1:
if cleaned[k][0].get('data', {}).get('status') == cleaned[k][1].get('data', {}).get('status') and cleaned[k][0].get('data', {}).get('execution_problem') == cleaned[k][1].get('data', {}).get('execution_problem'):
cleaned[k] = [cleaned[k][1]]
elif 'unreachable' in cleaned[k][0].get('data', {}).get('execution_problem', '') or 'aborted' in cleaned[k][0].get('data', {}).get('execution_problem', ''):
cleaned[k] = [cleaned[k][1]]
elif 'unreachable' in cleaned[k][1].get('data', {}).get('execution_problem', '') or 'aborted' in cleaned[k][1].get('data', {}).get('execution_problem', ''):
cleaned[k] = [cleaned[k][0]]
elif cleaned[k][0].get('data', {}).get('status') == 'success':
cleaned[k] = [cleaned[k][0]]
elif cleaned[k][1].get('data', {}).get('status') == 'success':
cleaned[k] = [cleaned[k][1]]
elif cleaned[k][0].get('data', {}).get('status') == 'error' and cleaned[k][1].get('data', {}).get('status') != 'error':
cleaned[k] = [cleaned[k][1]]
elif cleaned[k][1].get('data', {}).get('status') == 'error' and cleaned[k][0].get('data', {}).get('status') != 'error':
cleaned[k] = [cleaned[k][0]]
elif cleaned[k][1].get('data', {}).get('status') == 'error' and cleaned[k][0].get('data', {}).get('status') == 'error':
if len(cleaned[k][0].get('data', {}).get('execution_problem')) > len(cleaned[k][1].get('data', {}).get('execution_problem')):
cleaned[k] = [cleaned[k][0]]
else:
cleaned[k] = [cleaned[k][1]]
else:
print("unhandled case")
print(str(k))
print(cleaned[k][0].get('data', {}).get('status'))
print(cleaned[k][1].get('data', {}).get('status'))
print(cleaned[k][0].get('data', {}).get('execution_problem'))
print(cleaned[k][1].get('data', {}).get('execution_problem'))
end = []
for k in cleaned.keys():
if len(cleaned[k]) == 1:
end.append(cleaned[k][0])
else:
print(k)
print(cleaned[k])
contents['tests'] = end
with open(args[3], 'w') as f:
json.dump(contents, f)
|
import graphics
from random import randint
STARTING_VELOCITY = 11
SHIELD_REGEN = 2.5
BASE_THRUST = 1
HEALTH_MODIFIER = 3
class Ship:
def __init__(self, h, a, s, p, t, r, n):
self.health = h * HEALTH_MODIFIER
self.max_health = self.health
self.armor = a
self.shields = s * HEALTH_MODIFIER
self.power = p
self.thrust = t * BASE_THRUST
self.radius = r
self.name = n
if s == 0:
self.max_shields = 1
else:
self.max_shields = self.shields
self.position = randint(-50,50), -400
self.velocity = -STARTING_VELOCITY, 0
self.image = graphics.Circle(graphics.Point(self.position[0] + 400, self.position[1] + 400), r)
self.image.setFill(graphics.color_rgb(255, self.armor * 5, int(127 * self.shields / self.max_shields)))
self.last_move = 0, 0
def damage(self, amt, category):
multiplier = 1
piercing = 0
if category == 'energy':
multiplier = .8
elif category == 'kinetic':
multiplier = 1.2
elif category == 'plasma':
multiplier = .4
piercing = 10
damage = amt
if self.shields - damage * multiplier > 0:
self.shields -= damage * multiplier
else:
damage -= self.shields / multiplier
self.shields = 0
self.health -= max(damage - max(self.armor - piercing, 0), 0)
self.image.setFill(graphics.color_rgb(int(127 * (1 + max(self.health / self.max_health, 0))), self.armor * 5, int(127 * max(self.shields / self.max_shields, 0))))
def shieldgen(self, time):
#if self.shields == 0:
# return
self.shields += SHIELD_REGEN * time
if self.shields > self.max_shields:
self.shields = self.max_shields
class Corvette(Ship):
def __init__(self):
super().__init__(20, 0, 0, 1, .12, 2, 'Corvette')
class Destroyer(Ship):
def __init__(self):
super().__init__(25, 0, 15, 2, .1, 2.5, 'Destroyer')
class Cruiser(Ship):
def __init__(self):
super().__init__(35, 5, 25, 4, .1, 3, 'Cruiser')
class Battleship(Ship):
def __init__(self):
super().__init__(50, 15, 10, 8, .09, 3.5, 'Battleship')
class Dreadnought(Ship):
def __init__(self):
super().__init__(70, 25, 40, 32, .08, 5.5, 'Dreadnought')
|
import os, sys
print ("\033[1;32mlogin dulu gan,hubungi Author WA: +6285715209673")
username = 'djokers'
password = 'arsadganteng'
def restart():
ngulang = sys.executable
os.execl(ngulang, ngulang, *sys.argv)
def main():
uname = raw_input("username : ")
if uname == username:
pwd = raw_input("password : ")
if pwd == password:
print "\033[1;32m gud ea mamank",
sys.exit()
else:
print "\033[1;32msalah terus kontol\033[00m"
print "\033[1;32myang bener dong babi\033[00m"
restart()
else:
print "\033[1;32mSalah woy Kontol\033[00m"
print "\033[1;32mcontact author kalo gak tau memek >:(\n"
restart()
try:
main()
except KeyboardInterrupt:
os.system('clear')
restart()
|
import os
import asyncio
from sanic import Sanic
KAFKA_BROKER_URL = os.environ.get('KAFKA_BROKER_URL')
APP = Sanic()
LOOP = asyncio.get_event_loop()
from routers import *
if __name__ == '__main__':
APP.run(host="0.0.0.0", port=5050)
|
from rest_framework.views import APIView
from .serializers import UserModelSerializer
import json
from .models import User
from django.contrib.auth import authenticate, login
from rest_framework import generics
class RegisterView(generics.CreateAPIView):
serializer_class = UserModelSerializer
queryset = User.objects.all()
#接收注册信息
|
# Average number of words
# Count the number of words in a sentence in text file
# Anatoli Penev
# 11.01.2018
def main():
file_name = input("Enter file name: ")
parse_file(file_name)
def avg_words(num_words, line_count):
return num_words/line_count
def parse_file(file_name):
num_words = 0
line_count = 0
try:
with open(file_name, 'r') as f:
for line in f:
line_count += 1
num_words += len(line.split())
print("Number of words: ", num_words)
print("Number of lines: ", line_count)
print("Average words per sentence: {:.2f}".format(avg_words(num_words, line_count)))
except FileNotFoundError as error:
print("File Not Found!", error)
main()
main()
|
# Calculate the factorial of a number.
def Factorial(n):
# We know !1 is 1.
if n == 1:
return 1
# Because !5 is 5 * !4, we can say !n is n * !(n-1).
return n * Factorial(n-1)
|
import sys
import os
import asyncio
import string
import random
import time
import linecache
import operator
import tkinter
import datetime
# import in_place
from pubsub import Pub, Sub
from radio import Radio
from util import int2bytes, bytes2int, run
from free_port import get_free_tcp_port, get_free_tcp_address
from pushpull import Push, Pull
from collections import deque
from tkinter import *
from tkinter import messagebox
from datetime import datetime, date
RADIO_PORT = 55555
MSG_TOPIC = '10001'
STR_RANGE = 10
PP_PORT = get_free_tcp_port()
total_broadcast_no = 5
LINE = 1
COUNTER = 1
class Vertex():
def __init__(self, path, neighbourhood):
self.path = path
self.neighbourhood = neighbourhood
self.port = get_free_tcp_port()
self.pp_port = get_free_tcp_port()
self.radio_started = asyncio.Event()
self.pub_started = asyncio.Event()
self.heart_beat_started = asyncio.Event()
self.subbed_neighbors = {}
self.pushed_neighbours = {}
self.sub_listen_task = {}
self.node_failure = asyncio.Event()
self.lost_help_list = []
self.heartbeat_sense_buff = {}
self.temp_buff = {}
self.recovery_time_start = datetime.now()
self.srecovery = datetime.min.time()
def makeDir(self):
try:
os.mkdir(self.path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s" % path)
for vertex in (self.neighbourhood):
f = open(f'{path}/{vertex}.txt', 'a+')
f.close()
async def init_radio(self):
if not self.neighbourhood[1:]:
self.srecovery = datetime.now().time()
self.radio = Radio(RADIO_PORT, self.neighbourhood_watch)
self.radio_started.set()
print(f"1 Radio Started {self.port}, self id: {self.neighbourhood[0]}, neighbour list: {self.neighbourhood[1:]}")
await self.radio.start()
async def init_pub(self):
self.pub = Pub(self.port)
self.pub_started.set()
print('2 Pub Started')
while True:
if len(self.neighbourhood[1:]) is len(self.subbed_neighbors.keys()) and self.neighbourhood[1:]:
chars = string.ascii_uppercase + string.ascii_lowercase
msg = ''.join(random.choice(chars) for _ in range(STR_RANGE))
print(f'Sending by PUB: {msg}' )
self.pub.send(MSG_TOPIC, msg)
await asyncio.sleep(5 * len(self.neighbourhood[1:]))
async def init_heart_beat(self):
await self.radio_started.wait()
await self.pub_started.wait()
self.heart_beat_started.set()
while True:
if not self.neighbourhood[1:]:
# self.recovery_time_start = datetime.datetime.now()
self.recovery_pull = Pull(PP_PORT)
asyncio.create_task(self.recovery_pull.listen(self.gather_msg))
for broadcasting_no in range(total_broadcast_no):
msg = f'lost,{self.port},{PP_PORT},{self.neighbourhood[0]}'
self.radio.send(bytes(msg, 'utf-8'))
print(f"LOST msg broadcasting: {msg}")
await asyncio.sleep(5)
"""Shift recovery pull cancel"""
# self.recovery_pull.pull_cancel()
else:
if self.srecovery != datetime.min.time():
recent = datetime.now().time()
srecovery_time = datetime.combine(date.today(), recent) - datetime.combine(date.today(), self.srecovery)
srecord = open('Output.txt', 'a+')
srecord.write(f'Self Recovery time = {srecovery_time}')
srecord.write('\n')
srecord.close()
dialog = tkinter.Tk()
dialog.withdraw()
messagebox.showinfo(f'self Recovery_time', srecovery_time )
dialog.quit()
self.srecovery = datetime.min.time()
msg = f'ready,{self.port},{self.pp_port},{self.neighbourhood[0]}'
self.radio.send(bytes(msg, 'utf-8'))
print(f'Heart beat broadcasting: {self.port}, {self.neighbourhood[0]}')
await asyncio.sleep(5)
def neighbourhood_watch(self, msg, addr, port):
if self.neighbourhood[1:]:
str_msg = str(msg, 'utf-8')
msg_list = str_msg.split(',')
vertex_msg = msg_list[0]
vertex_port = msg_list[1]
vertex_pp_port = msg_list[2]
vertex_id = msg_list[3]
print(f'Received Heartbeat from {vertex_id}, {vertex_port} → {msg}')
print(self.node_failure.is_set)
if vertex_id in self.heartbeat_sense_buff.keys() and vertex_msg == 'ready':
self.heartbeat_sense_buff[vertex_id] += 1
print(f'Heartbeat Buffer: {self.heartbeat_sense_buff}')
if vertex_msg == 'ready' and vertex_id in self.neighbourhood [1:] and vertex_id not in self.subbed_neighbors:
self.lost_help_list.clear()
self.node_failure.clear()
for id in self.heartbeat_sense_buff:
self.heartbeat_sense_buff[id] = 0
self.heartbeat_sense_buff[vertex_id] = 0
print(f'Match found from READY msg: {vertex_id}')
sub = Sub(vertex_port)
self.subbed_neighbors[vertex_id] = sub
self.sub_listen_task[vertex_id] = asyncio.create_task(sub.listen(MSG_TOPIC, self.post_msg))
self.pushed_neighbours[vertex_id] = Push(vertex_pp_port)
print(f'From neighbourhood watch: READY \n {self.subbed_neighbors} \n {self.pushed_neighbours}')
elif vertex_msg == 'lost' and vertex_id in self.neighbourhood [1:] and vertex_id not in self.subbed_neighbors and vertex_id not in self.lost_help_list:
self.recovery_time_start = datetime.now().time()
self.lost_help_list.append(vertex_id)
self.recovery_push = Push(vertex_pp_port)
file = open(f'{self.path}/{vertex_id}.txt', 'r+')
for line_no, line in enumerate(file, 1):
self.recovery_push.send(f'{self.neighbourhood[0]}:{vertex_id}:{line}')
time.sleep(2)
file.truncate(0)
file.close()
self_file = open(f'{self.path}/{self.neighbourhood[0]}.txt', 'r')
for line_no, line in enumerate(self_file, 1):
if self.neighbourhood[line_no % (len(self.neighbourhood) - 1) + 1] == vertex_id:
line_data = f'{line_no}:{line}'
self.recovery_push.send(f'{self.neighbourhood[0]}:{self.neighbourhood[0]}:{line_data}')
time.sleep(2)
self_file.close()
self.recovery_push.push_cancel()
recent_time = datetime.now().time()
recovery_time = datetime.combine(date.today(), recent_time) - datetime.combine(date.today(), self.recovery_time_start)
window = tkinter.Tk()
window.withdraw()
messagebox.showinfo(f'Recovery_time by {self.neighbourhood[0]}', recovery_time )
window.quit()
record = open('Output.txt', 'a+')
record.write(f'Recovery time by {self.neighbourhood[0]} = {recovery_time}')
record.write('\n')
record.close()
print(f'From neighbourhood watch: LOST \n {self.subbed_neighbors} \n {self.pushed_neighbours}')
async def failure_detection(self):
await self.heart_beat_started.wait()
count = 1
single_temp = deque(maxlen=4)
failed_node = None
while True:
print(f'from failure detection: {self.node_failure}, {self.node_failure.is_set()}')
if self.neighbourhood[1:] and self.heartbeat_sense_buff and self.node_failure:
# fail_detect_start = datetime.datetime.now()
if len(self.neighbourhood[1:]) == 1 and not self.node_failure.is_set():
# fail_detect_start = datetime.datetime.now()
print(self.heartbeat_sense_buff)
print(self.heartbeat_sense_buff.values())
val, = self.heartbeat_sense_buff.values()
single_temp.append(val)
print(self.heartbeat_sense_buff)
print(single_temp)
if (len(single_temp) == 4) and (single_temp[0] - single_temp[3] == 0):
# detection_time = datetime.datetime.now() - fail_detect_start
failed_node = self.neighbourhood[1]
print(f'Failed id: {failed_node}')
self.node_failure.set()
single_temp.clear()
elif len(self.heartbeat_sense_buff.keys()) > 1 and not self.node_failure.is_set():
min_id, min_val = min(self.heartbeat_sense_buff.items(), key = lambda x: x[1])
max_id, max_val = max(self.heartbeat_sense_buff.items(), key = lambda x: x[1])
print(f'From failure detection : {self.heartbeat_sense_buff} -> {min_val}, {max_val}')
if (max_val - min_val) > 1:
print(f'Failed id: {min_id}')
failed_node = min_id
self.node_failure.set()
if failed_node is not None and self.node_failure.is_set():
with open('kill_log.txt') as file:
for line in file:
if f'{failed_node}kill_time' in line and line.strip() != "":
line_info_list = line.split('=')
node_id = line_info_list[0]
fail_detect_start = line_info_list[1]
datetime_obj = datetime.strptime(fail_detect_start, '%H:%M:%S.%f\n').time()
current_time = datetime.now().time()
detection_time = datetime.combine(date.today(), current_time) - datetime.combine(date.today(), datetime_obj)
top = tkinter.Tk()
top.withdraw()
messagebox.showinfo(f'Detection_time by {self.neighbourhood[0]}',detection_time)
top.quit()
record = open('Output.txt', 'a+')
record.write(f'Output records for {failed_node} \n')
record.write(f'Detection time by {self.neighbourhood[0]} = {detection_time}')
record.write('\n')
record.close()
self.sub_listen_task.pop(failed_node, None).cancel()
self.subbed_neighbors.pop(failed_node, None).sub_cancel()
self.pushed_neighbours.pop(failed_node, None).push_cancel()
del self.heartbeat_sense_buff[failed_node]
failed_node = None
await asyncio.sleep(5)
def post_msg(self, payload):
msg = str(payload[1], 'utf-8')
print(f'Received msg: {msg}')
file = open(f'{self.path}/{self.neighbourhood[0]}.txt', 'a+')
file.write(f'{msg}\n')
file.close()
async def partial_replication(self):
await self.heart_beat_started.wait()
print('Partial Replication Started')
self.pull = Pull(self.pp_port)
asyncio.create_task(self.pull.listen(self.replicate_msg))
await asyncio.sleep(5)
line_no = 1
while True:
print('partial checking')
print(f'Neigh: {self.neighbourhood} -> {len(self.neighbourhood[1:])}, push: {len(self.pushed_neighbours.keys())}, sub: {len(self.subbed_neighbors.keys())}')
if self.neighbourhood[1:] and len(self.neighbourhood[1:]) == len(self.pushed_neighbours.keys()) and len(self.neighbourhood[1:]) == len(self.subbed_neighbors.keys()):
print('hi i am in')
file = f'{self.path}/{self.neighbourhood[0]}.txt'
while file is None:
await asyncio.sleep(2)
line = linecache.getline(file, line_no)
while not line:
await asyncio.sleep(2)
linecache.clearcache()
line = linecache.getline(file, line_no)
print(f'New line: {line}')
round_robin_id = self.neighbourhood[line_no % (len(self.neighbourhood) - 1) + 1]
round_robin_no = line_no % (len(self.neighbourhood) - 1) + 1
print(f'round robin no: {round_robin_id} -> {round_robin_no}')
if round_robin_id in self.pushed_neighbours.keys():
self.pushed_neighbours[round_robin_id].send(f'{self.neighbourhood[0]},{line_no},{line}')
print(f'Vertex No: {round_robin_id}, data: {line}')
line_no = line_no + 1
await asyncio.sleep(0)
def replicate_msg(self, rec_payload):
message = str(rec_payload, 'utf-8')
message_list = message.split(',')
rec_vertex_id = message_list[0]
line_no = message_list[1]
line_data = message_list[2]
file = open(f'{self.path}/{rec_vertex_id}.txt', 'a+')
file.write(f'{line_no}:{line_data}')
file.close()
def gather_msg(self, message):
message = str(message, 'utf-8')
message_list = message.split(':')
sender_id = message_list[0]
rec_vertex_id = message_list[1]
line_no = int(message_list[2])
line_data = message_list[3]
print(f'The received message: {message}')
if rec_vertex_id == self.neighbourhood[0]:
global COUNTER
file = open(f'{self.path}/{self.neighbourhood[0]}.txt', 'a+')
if COUNTER == line_no:
file.write(f'{line_data}')
COUNTER += 1
while COUNTER in self.temp_buff.keys():
file.write(f'{self.temp_buff[COUNTER]}')
COUNTER += 1
else:
self.temp_buff[line_no] = line_data
file.close()
if sender_id not in self.neighbourhood[1:]:
self.neighbourhood.append(sender_id)
else:
f = open(f'{self.path}/{rec_vertex_id}.txt', 'a+')
f.write(f'{line_no}:{line_data}')
f.close()
async def start(self):
self.makeDir()
await asyncio.gather(
self.init_radio(),
self.init_pub(),
self.init_heart_beat(),
self.partial_replication(),
self.failure_detection(),
)
if __name__ == '__main__':
lis = sys.argv[2:]
o_path = os.path.abspath(os.path.realpath(sys.argv[1]))
path = (f'{o_path}/{lis[0]}')
vertex = Vertex(path, lis)
pid = os.getpid()
Store_id = open('process_ids.txt', 'a+')
Store_id.write(f'{lis[0]}:{pid}')
Store_id.write("\n")
Store_id.close()
try:
run(
vertex.start()
)
except KeyboardInterrupt:
print("Exiting...")
exit()
|
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So you are %r old, %r tall and %r heavy." % (
age , height, weight)
print "input math number:"
num = int(raw_input())
print "So %r multiply is %r " % (num,num*num)
#ex12.py
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh? ")
|
from _typeshed import Incomplete
def laplacian_spectrum(G, weight: str = "weight"): ...
def normalized_laplacian_spectrum(G, weight: str = "weight"): ...
def adjacency_spectrum(G, weight: str = "weight"): ...
def modularity_spectrum(G): ...
def bethe_hessian_spectrum(G, r: Incomplete | None = None): ...
|
import sys, random
menu = [['칼국수',6000],['비빔밥',5500],
['돼지국밥',7000],['돈까스',7000],['김밥',2000],['라면',2500]]
#1.
# for m in menu :
# if m[0] == '비빔밥' or m[0] == '돈까스':
# print(m[1])
#2.
while True:
check = False
a = input("메뉴 입력")
for m in menu :
if m[0] == a :
print("메뉴중복 다시입력")
check = True
break
if check :
continue
b = int(input("가격 입력"))
break
menu.append([a,b])
print(menu)
# #3.
a = input("메뉴 입력")
check = True
for m in menu :
if m[0] == a :
check = False
print("동일 메뉴 존재 가격만 입력"+m[0])
b = int(input("가격 변경 입력"))
m.pop(1)
m.insert(1,b)
break
if check :
b = int(input("가격 입력"))
menu.append([a,b])
# print(menu)
# #4.
# num = random.randint(0,len(menu))
# print(menu[num])
|
from django import forms
from django.contrib.auth.models import User
class RegistrationForm(forms.Form):
username = forms.CharField(label='Username', max_length=100, widget=forms.TextInput)
password1 = forms.CharField(label='Password', max_length=10, min_length=6, widget=forms.TextInput)
password2 = forms.CharField(label='Confirm password', max_length=10, min_length=6, widget=forms.TextInput)
def clean_password(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Password didn\'t match')
return password2
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 12:22:02 2019
@author: kj22643
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 27 15:11:10 2019
@author: kj22643
"""
%reset
import numpy as np
import pandas as pd
import os
import scanpy as sc
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import plot, show, draw, figure, cm
import matplotlib as plt
import random
from collections import OrderedDict
import copy
import matplotlib.pyplot as plt
from pandas import DataFrame, Series
import plotnine as gg
import scipy as sp
import scipy.stats as stats
import sklearn as sk
import sklearn.model_selection as model_selection
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold
import sklearn.feature_selection as feature_selection
import sklearn.linear_model as linear_model
import sklearn.pipeline as pipeline
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
os.chdir('/Users/kj22643/Documents/Documents/231_Classifier_Project/code')
from func_file import find_mean_AUC
from func_file import find_mean_AUC_SVM
path = '/Users/kj22643/Documents/Documents/231_Classifier_Project/data'
#path = '/stor/scratch/Brock/231_10X_data/'
os.chdir(path)
sc.settings.figdir = 'KJ_plots'
sc.set_figure_params(dpi_save=300)
sc.settings.verbosity = 3
#%% Load in pre and post treatment 231 data
adata = sc.read('daylin_anndata.h5ad')
adata.obs.head()
# current samples:
#BgL1K
#30hr
#Rel-1
#Rel-2
# We will change these to time points
#%% Assign survivor category in adata.obs
longTreatLins = adata.obs.loc[(adata.obs['sample'].isin(['Rel-1','Rel-2']))&(adata.obs.lineage!='nan'),'lineage'].unique().tolist()
adata.obs.loc[adata.obs.lineage.isin(longTreatLins)==False,'survivor'] = 'sens'
adata.obs.loc[adata.obs.lineage.isin(longTreatLins)==True,'survivor'] = 'res'
# %%try to rename the samples by time point
samps= adata.obs['sample'].unique()
timepoint = np.array(['t=0hr', 't=30hr', 't=1344hr'])
adata.obs.loc[adata.obs['sample']==samps[0], 'timepoint']='t=0hr'
adata.obs.loc[adata.obs['sample']==samps[1], 'timepoint']='t=30hr'
adata.obs.loc[adata.obs['sample']==samps[2], 'timepoint']='t=1344hr'
adata.obs.loc[adata.obs['sample']==samps[3], 'timepoint']='t=1344hr'
print(adata.obs['timepoint'].unique())
#%% Separately make dataframes for the pre-treatment, intermediate, and post treatment samples
# t=0 hr (pre-treatment), 3182 pre treatment cells
# We want to keep the info about the lineage so we can potentially
# use it to make evenly divided testing and training data sets
adata_pre = adata[adata.obs['timepoint']=='t=0hr', :]
dfpre = pd.concat([adata_pre.obs['survivor'], adata_pre.obs['lineage'],
pd.DataFrame(adata_pre.raw.X,index=adata_pre.obs.index,
columns=adata_pre.var_names),],axis=1)
# t = 30 hr (intermediate timepoint) 5169 int treatment cells
adata_int = adata[adata.obs['timepoint']=='t=30hr', :]
dfint = pd.concat([adata_int.obs['lineage'],
pd.DataFrame(adata_int.raw.X, index=adata_int.obs.index,
columns = adata_int.var_names),], axis=1)
# t=1344 hr (~roughly 8 weeks), 10332 post treatment cells
adata_post = adata[adata.obs['timepoint']=='t=1344hr', :]
dfpost = pd.concat([adata_post.obs['lineage'],
pd.DataFrame(adata_post.raw.X, index=adata_post.obs.index,
columns = adata_post.var_names),],axis =1)
#%% Use sklearn to do principle component analysis on the entire pre-treatment sample
#X = dfpre.loc[:, dfpre.columns !='survivor', dfpre.columns !='lineage']
X = dfpre.drop(columns= ['survivor', 'lineage'])
y= pd.factorize(dfpre['survivor'])[0]
ncells = len(y)
mu_pre = sum(y)/len(y)
# X is your cell gene matrix, y is your class labels
#%% Set up cross validation where your test set is not contained in your training set at all
# Split into train/test
kCV = 5
# use this function to ensure that the class balance is maintained for each of your test sets
skf = StratifiedKFold(n_splits=kCV, shuffle= True)
Atrain = {}
Atest = {}
ytest = {}
ytrain = {}
mu_true_test = {}
ntest = {}
folds_dict = {'trainmat':{}, 'trainlabel':{}, 'V':{}, 'lambdas':{}, }
for i in range(kCV):
for train_index, test_index in skf.split(X, y):
Atrain[i] = X.iloc[train_index, :]
Atest[i] = X.iloc[test_index, :]
ytest[i]= y[test_index]
ytrain[i]= y[train_index]
mu_true_test[i] = sum(ytest[i])/len(ytest[i])
ntest[i]=len(ytest[i])
# Save all of your stratified folds into a single dictionary.
folds_dict['trainmat'] = Atrain
folds_dict['trainlabel']= ytrain
folds_dict['testmat'] = Atest
folds_dict['testlabel'] = ytest
folds_dict['prevtest'] = mu_true_test
folds_dict['ntest'] = ntest
n_classes = len(np.unique(y))
# %%Assign the optimal parameters (foudn from KJ_classify_sklearn.py) for building your prediction model
# p(x|Sj) where Sj is your training set and x is any new cell (in your test set or in future cells)
n_neighbors = 15
n_components = 100
random_state = 0
Copt = 1000
basis = 'rbf'
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
pca=PCA(copy=True, iterated_power='auto', n_components=n_components, random_state=0,
svd_solver='auto', tol=0.0, whiten=False)
clf = svm.SVC(kernel=basis, C=Copt)
#%% Build a new model for each fold and apply it to the test set to generate mu_hat
# where mu_hat_j is the average value of the test set predictions (sens=0, res =1) from the training set model
y_PCA = {}
mu_PCA = {}
y_SVM = {}
mu_SVM = {}
V_train = {}
for i in range(kCV):
X_train = folds_dict['trainmat'][i]
y_train = folds_dict['trainlabel'][i]
X_test = folds_dict['testmat'][i]
y_test = folds_dict['testlabel'][i]
# PCA MODEL OUTPUTS FOR EACH FOLD
pca.fit(X_train, y_train)
V_train[i] = pca.fit_transform(X_train)
# Fit a nearest neighbor classifier on the model built on the training data set
knn.fit(pca.transform(X_train), y_train)
y_PCA[i] = knn.predict(pca.transform(X_test))
# Compute the nearest neighbor accuracy on the embedded test set
mu_PCA[i] = sum(y_PCA[i])/ folds_dict['ntest'][i]
# SVM MODEL OUTPUTS FOR EACH FOLD
clf.fit(X_train, y_train)
y_SVM[i]= clf.predict(X_test)
mu_SVM[i] = sum(y_SVM[i])/folds_dict['ntest'][i]
#%% Put into folds_dict
folds_dict['V_train'] = V_train
folds_dict['y_PCA']= y_PCA
folds_dict['mu_PCA'] = mu_PCA
folds_dict['y_SVM'] = y_SVM
folds_dict['mu_SVM'] = mu_SVM
#%%Compare the mu_SVM and mu_PCA test set estimates of the expectation to the known
# prevalence of the test set
df = pd.DataFrame()
dfprevtest = pd.DataFrame(folds_dict['prevtest'], index=[0])
dfmu_PCA= pd.DataFrame(folds_dict['mu_PCA'], index = [0])
dfmu_SVM = pd.DataFrame(folds_dict['mu_SVM'], index = [0])
npprevtest=np.array(dfprevtest)
npmu_PCA = np.array(dfmu_PCA)
npmu_SVM = np.array(dfmu_SVM)
mu_pre = np.mean(npprevtest)
mu_hat_PCA = np.mean(npmu_PCA)
mu_hat_SVM = np.mean(npmu_SVM)
ntest = folds_dict['ntest'][0]
print(mu_hat_PCA)
print(mu_hat_SVM)
sigmasq_PCA =(1-mu_hat_PCA)*mu_hat_PCA/ntest
sigmasq_SVM = (1-mu_hat_SVM)*mu_hat_SVM/ntest
print(sigmasq_PCA)
print(sigmasq_SVM)
#%% Next step, apply the models to the subsequent time points!
|
import numpy as np
from scipy import sparse as sp
from app.ds.graph import base_graph
from app.utils.constant import GCN
from app.ds.graph.base_graph import symmetic_adj
class Graph(base_graph.Base_Graph):
'''Base class for the graph data structure'''
def __init__(self, model_name=GCN, sparse_features=True):
'''Method to initialise the graph'''
super(Graph, self).__init__(model_name=model_name, sparse_features=sparse_features)
def read_network(self, network_data_path):
'''
Method to read the network from `network_data_path`
'''
node_count = self.features.shape[0]
edges = np.genfromtxt(network_data_path, dtype=np.dtype(str))
# I guess the code would break for the case when we have just 1 edge. I should be fixing that later
if (edges.shape[1] == 2):
# unwieghted graph
edges = np.array(list(map(self.node_to_id_map.get, edges.flatten())),
dtype=np.int32).reshape(edges.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(node_count, node_count), dtype=np.float32)
else:
# weighted graph
edges = np.array(list(map(
lambda _edge_data: (self.node_to_id_map[_edge_data[0]],
self.node_to_id_map[_edge_data[1]],
float(_edge_data[2])),
edges)), dtype=np.int32).reshape(edges.shape)
adj = sp.coo_matrix((edges[:, 2], (edges[:, 0], edges[:, 1])),
shape=(node_count, node_count), dtype=np.float32)
self.adj = symmetic_adj(adj)
self.edge_count = edges.shape[0]
print("{} edges read.".format(self.edge_count))
return adj
|
# find all numbers between 2000 and 3200, divisable by 7 but NOT multiple of 5
# solution should be comma separated on a single line
num = []
for i in range(2000, 2301):
if (i % 7 == 0) and (i % 5 != 0):
num.append(str(i))
print(','.join(num))
# compute the factorial of a given number, result printed in csv single line
# factorial is the integer product of a number and all the numbers below it
# e.g. 4! is 4 * 3 * 2 * 1
def factorial(base, exp):
if exp == 0:
return 1
return base * factorial(base, exp - 1)
seq = []
for i in range(1, 9):
seq.append(factorial(i, 2))
print(tuple(seq))
|
#!/usr/bin/env python
import tensorflow as tf
# Model paramters
session = tf.Session()
W = tf.Variable([0.3])
b = tf.Variable([-0.3])
# Model inputs and outputs
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
init = tf.global_variables_initializer()
# loss = reduce_sum((Wx +b)^2)
linear_model = W * x + b
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
# Optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
session.run(init) # Reset the values.
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
for i in range(1000):
session.run(train, {x: x_train, y: y_train})
curr_W, curr_b, curr_loss = session.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
|
import numpy as np
import torch
from typing import Any
from torch import nn
from torch.utils.data import Dataset
def CustomImageDataset(Dataset):
def __init__(self):
pass
class GradientDescent:
def __init__(self, *args):
self.args = args
def __repr__(self) -> str:
return f"Gradient descent class represented by {self.args}"
def __len__(self):
return len(self.args)
def SGD(gradient: Any, start: Any, learning_rate: float, n_iter: int):
vector = start
for _ in range(n_iter):
diff = -learning_rate + gradient(vector)
vector += diff
return vector
def principal_function_chapter5():
t_c = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
t_c = torch.tensor(t_c)
t_u = torch.tensor(t_u)
def model(t_u, w, b):
return w * t_u + b
def loss_fn(t_p, t_C):
squared_diffs = (t_p - t_C)**2
return squared_diffs.mean()
w = torch.ones(())
b = torch.zeros(())
print(w, b)
t_p = model(t_u, w, b)
print(t_p)
loss = loss_fn(t_p, t_c)
print(loss)
x = torch.ones(())
y = torch.ones(3,1)
z = torch.ones(1,3)
a = torch.ones(2, 1, 1)
print(f"shapes: x: {x.shape}, y: {y.shape}")
print(f" z: {z.shape}, a: {a.shape}")
print("x * y:", (x * y).shape)
print("y * z:", (y * z).shape)
print("y * z * a:", (y * z * a).shape)
delta = 0.1
loss_rate_of_change_w = \
(loss_fn(model(t_u, w + delta, b), t_c) -
loss_fn(model(t_u, w - delta, b), t_c)) / (2.0 * delta)
def main():
t_c = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
t_c = torch.tensor(t_c).unsqueeze(1) # <1>
t_u = torch.tensor(t_u).unsqueeze(1) # <1>
t_u.shape
n_samples = t_u.shape[0]
n_val = int(0.2 * n_samples)
shuffled_indices = torch.randperm(n_samples)
train_indices = shuffled_indices[:-n_val]
val_indices = shuffled_indices[-n_val:]
train_indices, val_indices
t_u_train = t_u[train_indices]
t_c_train = t_c[train_indices]
t_u_val = t_u[val_indices]
t_c_val = t_c[val_indices]
t_un_train = 0.1 * t_u_train
t_un_val = 0.1 * t_u_val
def training_loop(n_epochs, optimizer, model,
loss_fn, t_u_train, t_u_val, t_c_train, t_c_val):
for epoch in range(1, n_epochs + 1):
t_p_train = model(t_u_train)
loss_train = loss_fn(t_p_train, t_c_train)
t_p_val = model(t_u_val)
loss_val = loss_fn(t_p_val, t_c_val)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
if epoch == 1 or epoch % 1000 == 0:
print(f"Epoch {epoch}, Training loss {loss_train.item():.4f},"
f" Validation loss {loss_val.item():.4f}")
linear_model = nn.Linear(1, 1)
optimizer = torch.optim.SGD(linear_model.parameters(), lr=1e-2)
training_loop(n_epochs = 3000,
optimizer = optimizer,
model = linear_model,
loss_fn = nn.MSELoss(),
t_u_train = t_un_train,
t_u_val = t_un_val,
t_c_train = t_c_train,
t_c_val = t_c_val)
print(linear_model.weight)
print(linear_model.bias)
if __name__ == "__main__":
# gradient = GradientDescent()
# print(gradient)
main()
|
# from keras.models import Sequential
# from keras.layers import Dense, Activation
# model = Sequential()
# model.add(Dense(32, input_shape=(784,)))
# model.add(Activation('relu'))
# model.add(Dense(10))
# model.add(Activation('softmax'))
import cv2
import numpy as np
listArray = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]];
npArray_1 = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]);
npArray_2 = np.array([1,1,1]);
print(npArray_1.size/3);
# maskImage = np.zeros((13,240,3));
# maskImage[::,0] = 255;
# print(maskImage[13:13:1]);
sumArray = np.array([npArray_1[:,0].sum(),npArray_1[:,1].sum(),npArray_1[:,2].sum()]);
meanArray = np.zeros(npArray_1.shape);
temp = np.array(sumArray/float(npArray_1.size/3));
meanArray[:,0] = temp[0];
meanArray[:,1] = temp[1];
meanArray[:,2] = temp[2];
mean = np.array(temp);
temp_2 = (npArray_1-meanArray)**2;
# np.array([(npArray_1-meanArray)**2[:,0].sum(),(npArray_1-meanArray)**2[:,1].sum(),(npArray_1-meanArray)**2[:,2].sum()]);
variance = np.array([temp_2[:,0].sum(),temp_2[:,1].sum(),temp_2[:,2].sum()])/float(npArray_1.size/3);
print(sumArray);
print(mean);
print(variance);
# print(gl.worktDir_);
# img = cv.imread(r'C:\Users\kitrol\Desktop\moto_1.bmp');
# ret,img = cv.threshold(img,0,255,cv.THRESH_BINARY); # 反转颜色 黑色区域变白,其他区域变黑 CV_THRESH_BINARY_INV|CV_THRESH_OTSU
# check_img = np.zeros(img.shape,img.dtype);
# check_img[::] = 255;
# # check = np.zeros((5,5,3),img.dtype);
# ps.showImageInWindow('1',10000,img);
# temp = np.zeros((5,6,3));
# temp[:5, 0:,0] = 11;
# temp[:5, :3,1] = 12;
# temp[:5, 0:,2] = 13;
# print(temp);
# meanArray = np.full(npArray_1.size/3,sumArray/float(npArray_1.size/3));
# temp = npArray_1 - npArray_2;
# print(temp);
# file = open("/Users/liangjun/Desktop/123.txt",'wb');
# file.write("%d %d %d\n"%(listArray[0][0],listArray[1][1],listArray[2][2]));
# file.close();
### read image test
# img = cv2.imread("/Users/liangjun/Desktop/JF15_022_2_HE.bmp");
# img = cv2.GaussianBlur(img,(3,3),0);
# # img=img[:,:,0];
# # img[:,:,2]=0;
# canny = cv2.Canny(img, 100, 150);
# # cv2.namedWindow("canny",cv2.WINDOW_NORMAL);
# # cv2.imshow("canny", canny);
# cv2.imwrite("/Users/liangjun/Desktop/edge.bmp", canny);
# print(img.shape);
# print(img.size);
# print(canny.shape);
# print(canny.size);
# for width in range(canny.shape[0]):
# for height in range(canny.shape[1]):
# if canny[width,height] > 0:
# img[width,height] = (255,255,255);
# pass
# # print("width is ",canny.width);
# cv2.imwrite("/Users/liangjun/Desktop/edge_2.bmp", img);
# cv2.waitKey(10000);
# cv2.destroyAllWindows();
#### camera use test
# cap = cv2.VideoCapture(0);
# while(True):
# ret, frame = cap.read();
# # Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);
# # Display the resulting frame
# cv2.imshow('frame',gray);
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# # When everything done, release the capture
# cap.release();
# cv2.destroyAllWindows();
##### save video test
# cap = cv2.VideoCapture(0)
# # Define the codec and create VideoWriter object
# fourcc = cv2.cv.FOURCC(*'XVID')
# out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# while(cap.isOpened()):
# ret, frame = cap.read()
# if ret==True:
# frame = cv2.flip(frame,0)
# # write the flipped frame
# out.write(frame);
# cv2.imshow('frame',frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break;
# else:
# break;
# # Release everything if job is finished
# cap.release()
# out.release()
# cv2.destroyAllWindows()
|
'''5206.删除字符串中的所有相邻重复项'''
s = "deeedbbcccbdaa"
k = 3
i=0
n=len(s)
while(i+k<=n):
if s[i:i+k]==s[i]*k:
#1.采用切片结合字符串的乘法来进行对比处理
#2.或者将字符逐个对比,累计相同字符的个数然后与k对比
#3.或者将字符逐个添加到一个list中,进行set处理,看
#处理之后长度是否为1
#后面两种方法花费时间比第一种方法更长
s=s[:i]+s[i+k:]
n=n-k
if i-k>0:
i-=k
else:
i=0
else:
i+=1
print(s)
|
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from rubicon_ml.client import ( # noqa: E402
Artifact,
Dataframe,
Experiment,
Feature,
Metric,
Parameter,
Project,
Rubicon,
)
from rubicon_ml.client.utils.exception_handling import set_failure_mode # noqa: E402
from rubicon_ml.intake_rubicon.publish import publish # noqa: E402
__all__ = [
"Artifact",
"Dataframe",
"Experiment",
"Feature",
"Metric",
"Parameter",
"Project",
"publish",
"Rubicon",
"set_failure_mode",
]
|
n = int(input())
percentage = map(int, input().split())
print(sum(percentage)/n)
|
#!/usr/bin/env python3
import re
regex = re.compile(r'^(\w+)\s*\((\d+)\)(?:\s*->\s*((?:\w+, )*\w+))?\s*$')
weights = {}
prgmap = {}
revmap = {}
#with open('test.txt', 'r') as f:
with open('input.txt', 'r') as f:
for line in f:
match = regex.match(line)
if match:
prgname, weight, childlst = match.groups()
weights[prgname] = weight
if childlst:
children = childlst.split(', ')
prgmap[prgname] = set(children)
for i in children:
revmap[i] = prgname
else:
raise Exception('Line mismatch: ' + line)
first = None
for k in prgmap.keys():
if k not in revmap:
first = k
print(first)
|
import json
from datetime import datetime
import requests
from django.conf import settings
def _get_default_whatsapp_config():
return {
"admin_report": {
"message": "Coronasafe Network",
"header": "Daily summary auto-generated from care.",
"footer": "Coronasafe Network",
}
}
def generate_whatsapp_message(object_name, public_url, phone_number):
if settings.WHATSAPP_MESSAGE_CONFIG:
message_dict = json.loads(settings.WHATSAPP_MESSAGE_CONFIG)
else:
message_dict = _get_default_whatsapp_config()
message = message_dict["admin_report"]
message["document"] = public_url
message["file_name"] = f"{object_name}-{datetime.now().date()}.pdf"
return _send(message, phone_number)
def _opt_in(phone_number):
url_data = {
"method": "OPT_IN",
"auth_scheme": "plain",
"v": "1.1",
"phone_number": phone_number,
"password": settings.WHATSAPP_API_PASSWORD,
"userid": settings.WHATSAPP_API_USERNAME,
"channel": "whatsapp",
}
resp = requests.post(settings.WHATSAPP_API_ENDPOINT, params=url_data)
return resp
def _send(message, phone_number):
_opt_in(phone_number)
url_data = {
"method": "SendMediaMessage",
"auth_scheme": "plain",
"v": "1.1",
"send_to": phone_number,
"msg": message["message"],
"isHSM": "True",
# "buttonUrlParam": str(notification_id),
"msg_type": "DOCUMENT",
"media_url": message["document"],
"password": settings.WHATSAPP_API_PASSWORD,
"userid": settings.WHATSAPP_API_USERNAME,
"isTemplate": "true",
"caption": message["header"],
"footer": message["footer"],
"filename": message["file_name"],
}
resp = requests.post(settings.WHATSAPP_API_ENDPOINT, params=url_data)
return resp
|
from django.contrib import admin
from django.urls import path,include
from Segment import views
urlpatterns = [
path('admin/', admin.site.urls),
path('Segment/', include('Segment.urls')),
path('mobileforecastleaf', views.mobile_forecast_leaf, name="mobileforecast_leaf"),
]
|
from django.conf.urls import url
from . import views
urlpatterns = [
# ex: /pas/
url(r'^$', views.index, name='index'),
url(r'^get/student/(?P<sid>[0-9]+)/$', views.student, name='student'),
url(r'^get/studentList/', views.studentList, name='studentList'),
url(r'^get/lecturer/(?P<lid>[0-9]+)/$', views.lecturer, name='lecturer'),
url(r'^get/lecturerList/', views.lecturerList, name='lecturerList'),
url(r'^get/group/(?P<gid>[0-9]+)/$', views.group, name='group'),
url(r'^get/groupList/', views.groupList, name='groupList'),
url(r'^get/project/(?P<pid>[0-9]+)/$', views.project, name='project'),
url(r'^get/projectList/', views.projectList, name='projectList'),
url(r'^get/listAll/', views.listAll, name='listAll'),
url(r'^get/matching/', views.matching, name='matching'),
url(r'^get/clearMatching/', views.clearMatching, name='clearMatching'),
]
|
# Generated by Django 3.0.8 on 2020-08-16 06:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('eshop_products', '0011_auto_20200801_1726'),
]
operations = [
migrations.CreateModel(
name='ProductComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=150, verbose_name='نام و نام خانوادگی')),
('email', models.EmailField(max_length=200, verbose_name='آدرس ایمیل شما')),
('message', models.TextField(verbose_name='نظر شما')),
('is_read', models.BooleanField(default=False, verbose_name='خوانده شده / نشده')),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='eshop_products.Product', verbose_name='محصول')),
],
),
]
|
"""
Author: Jason Eisele
Date: October 1, 2020
Email: jeisele@shipt.com
Scope: App for Tensorflow Doggo classifier
"""
from pydantic import BaseModel
class HousePredictionResult(BaseModel):
median_house_value: int
currency: str = "USD"
|
import psycopg2
conn = psycopg2.connect(database="probe_management_system", user = "postgres", password = "nikhil", host = "127.0.0.1", port = "5432")
print("Opened database successfully")
|
# !/usr/bin/python3
"""
Description Bytes
-----------------------------
SOF: 0-3
Control: 4
Data Length: 5
MPix Temp: 6-9
RPI Temp: 10-13
Frame Counts: 14-17
Frame Dose Rate: 18-21
Frame Count ID: 22-25
Device ID: 26-29
Unix Timestamp: 30-33
Error Flags: 34-35
Checksum: 36-39
EOF: 40
"""
import struct
def float_to_hex(value):
hex_string = hex(struct.unpack('<I', struct.pack('<f', value))[0]).replace('0x','')
return bytes.fromhex(hex_string)
def int_to_hex(value):
hex_string = format(value, '#010x').replace('0x', '')
return bytes.fromhex(hex_string)
def downlink_packet(ser, data, error=None):
"""
:param ser: Take a serial port
:param data: Take a dictionary of updated values
:return: void
Mutate the data packet template using a dictionary
of key, value pairs into thier respective binary strings
then send each string down the serial port
"""
packet = dict(SOF=b'@\xA333',
CTRL=b'\x0B',
DATALEN=b'\xDD',
MPTMP=b'\xCC\xCC\xCC\xCC',
RPiTMP=b'\xCC\xCC\xCC\xCC',
FrCOUNTS=b'\xDD\xDD\xDD\xDD',
FrDRATE=b'\xEE\xEE\xEE\xEE',
FrCountID=b'\xFF\xFF\xFF\xFF',
DID=b'\x11\x11\x11\x11',
TIME=b'\x10\x11\x10\x11',
ERR=b'\xEE\xEE',
CHKSUM=b'\xCC\xEE\xCC\xEE',
EOF=b'\x0F')
packet['MPTMP'] = float_to_hex(data['MPTMP'])
packet['RPiTMP'] = float_to_hex(data['RPiTMP'])
packet['FrCOUNTS'] = int_to_hex(data['FrCOUNTS'])
packet['FrDRATE'] = float_to_hex(data['FrDRATE'])
packet['FrCountID'] = int_to_hex(data['FrCountID'])
packet['DID'] = int_to_hex(data['DID'])
packet['TIME'] = int_to_hex(data['TIME'])
for key, value in packet.items():
ser.write(value)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Static analysis tool for checking compliance with Python docstring conventions.
See https://www.pantsbuild.org/docs/python-linters-and-formatters and
http://www.pydocstyle.org/en/stable/.
"""
from pants.backend.python.lint.pydocstyle import rules as pydocstyle_rules
from pants.backend.python.lint.pydocstyle import skip_field, subsystem
def rules():
return (*pydocstyle_rules.rules(), *skip_field.rules(), *subsystem.rules())
|
# python is happy
# make happy plots
import json
import matplotlib.pyplot as plt
import numpy as np
import argparse
try:
from scipy.optimize import curve_fit
except:
print "Cannot import curve_fit from scipy.optimize"
class Plotter:
def __init__(self, data_dir, plot_dir, get_data=True):
if data_dir[-1] != "/":
data_dir += "/"
if plot_dir[-1] != "/":
plot_dir += "/"
self.constants = {}
self.data_dir = data_dir
self.plot_dir = plot_dir
if get_data:
self.data = self.getData(data_dir)
xkcd_colors = ["pinkish red","azure","electric purple","bluish green","tangerine","neon pink","dark sky blue","avocado"]
self.colors = list("xkcd:{0}".format(c) for c in xkcd_colors)
def getData(self, data_dir):
sipm_file = "sipm_table.txt"
pd_file = "pd_table.txt"
dictionaries = [ { "file" : sipm_file, "tag" : "sipm", "element" : "rm" },
{ "file" : pd_file, "tag" : "pd", "element" : "pd" } ]
data = {}
data["sipm"] = []
data["pd"] = []
data["pd_quartz"] = []
data["pd_megatile"] = []
data["iteration"] = []
for pd in xrange(6):
element_name = "pd%d" % pd
data[element_name] = []
data["%s_ave" % element_name] = []
for rm in xrange(1,5):
element_name = "rm%d" % rm
data[element_name] = []
data["%s_ave" % element_name] = []
for sipm in xrange(48):
data["%s_sipm%d" % (element_name, sipm)] = []
# sipm array: {CU,RBX,Run,RM,sipm_ch,uhtr_ch,shunt,max_adc,max_fc,result}
# pd array: {CU,RBX,Run,pd_ch,uhtr_ch,shunt,max_adc,max_fc,result}
self.cu_list = []
charge_position = -2
adc_position = -3
iteration_position = 2
element_position = 3
sipm_position = 4
for dictionary in dictionaries:
f = dictionary["file"]
tag = dictionary["tag"]
element = dictionary["element"]
with open(data_dir + f) as df:
for line in df:
if line[0] == "#":
continue
s = line.split()
#if int(s[-1]) == 0:
# skip data that does not pass
#print line
#continue
#d = int(s[adc_position])
d = float(s[charge_position])
iteration = int(s[iteration_position])
element_index = int(s[element_position])
element_name = "%s%d" % (element, element_index)
data[tag].append(d)
data[element_name].append(d)
if iteration not in data["iteration"]:
data["iteration"].append(iteration)
# data organized by CU
cu = int(s[0])
if cu not in self.cu_list:
self.cu_list.append(cu)
cu_name = "cu%d" % cu
data[cu_name] = {}
for pd in xrange(6):
data[cu_name]["pd%d" % pd] = []
for rm in xrange(1,5):
data[cu_name]["rm%d" % rm] = []
data[cu_name]["sipm"] = []
# constants for normalization
self.constants[cu_name] = {}
# constants for pd0, pd1, and average of pd0 and pd1
self.constants[cu_name]["pd0"] = 0.0
self.constants[cu_name]["pd1"] = 0.0
self.constants[cu_name]["pd_ave"] = 0.0
# constants for RM SiPMs (4 constants for pd0, pd1, and average of pd0 and pd1)
self.constants[cu_name]["sipm_pd0"] = []
self.constants[cu_name]["sipm_pd1"] = []
self.constants[cu_name]["sipm_ave"] = []
# add elements per CU
data["cu%d"%cu][element_name].append(d)
if tag == "sipm":
data["%s_sipm%s" % (element_name, s[sipm_position])].append(d)
data[cu_name]["sipm"].append(d)
data["pd_quartz"] = data["pd0"] + data["pd0"]
data["pd_megatile"] = data["pd2"] + data["pd3"] + data["pd4"] + data["pd5"]
for cu in self.cu_list:
cu_key = "cu%d" % cu
for pd in xrange(6):
element_key ="pd%d" % pd
if data[cu_key][element_key]:
ave = np.mean(data[cu_key][element_key])
data["%s_ave" % element_key].append(ave)
else:
print "No pin-diode data for {0} {1}".format(cu_key, element_key)
for rm in xrange(1,5):
element_key ="rm%d" % rm
if data[cu_key][element_key]:
ave = np.mean(data[cu_key][element_key])
data["%s_ave" % element_key].append(ave)
else:
print "No sipm data for {0} {1}".format(cu_key, element_key)
#for key in data:
# print "length of {0} data: {1}".format(key, len(data[key]))
return data
def logarithm(self, x, a, b, c):
return a * np.log(b * x) + c
def getStat(self, x_min, x_max, y_min, y_max, loc=1):
xstat, ystat = 0, 0
if loc == 0: # top left
x_stat = x_min + (x_max - x_min) / 8.0
y_stat = y_max - (y_max - y_min) / 3.0
elif loc == 1: # top right
x_stat = x_max - (x_max - x_min) / 3.0
y_stat = y_max - (y_max - y_min) / 3.0
elif loc == 2: # upper top left for scatter
x_stat = x_min + (x_max - x_min) / 25.0
y_stat = y_max - (y_max - y_min) / 4.0
return (x_stat, y_stat)
def plotHisto(self, data, info, fileName="", stacked=False):
name = info["name"]
title = info["title"]
xtitle = info["xtitle"]
ytitle = info["ytitle"]
nbins = info["nbins"]
units = info["units"]
setRange = info["setrange"]
statLocation = info["statloc"]
if setRange:
x_range = info["xrange"]
y_range = info["yrange"]
# data array can be a 1D or 2D matrix
# data array is 2D for stacked histograms
data_array = data[name]
data_list = []
stack_colors = []
if stacked:
for i, x in enumerate(data_array):
stack_colors.append(self.colors[i % len(self.colors)])
for d in x:
data_list.append(d)
else:
data_list = data_array
stack_names = list("RM %d" % rm for rm in xrange(1,5))
print "name = {0}".format(name)
#print "data_list = {0}".format(data_list)
if not data_list:
print "There is no data for {0}.".format(name)
return
fig, ax = plt.subplots()
entries = len(data_list)
mean = np.mean(data_list)
std = np.std(data_list)
var = 100.0 * std / mean
min_val = min(data_list)
max_val = max(data_list)
stat_string = "Num Entries = %d\n" % entries
stat_string += "Mean = %.2f %s\n" % (mean, units)
stat_string += "Std Dev = %.2f %s\n" % (std, units)
stat_string += "Variation = %.2f %%\n" % var
stat_string += "Min = %.2f %s\n" % (min_val, units)
stat_string += "Max = %.2f %s" % (max_val, units)
if setRange:
axes = plt.gca()
axes.set_xlim(x_range)
axes.set_ylim(y_range)
if stacked:
h_y, h_x, h = plt.hist(data_array, bins=nbins, range=x_range, color=stack_colors, label=stack_names, stacked=stacked)
else:
h_y, h_x, h = plt.hist(data_list, bins=nbins, range=x_range)
xstat, ystat = self.getStat(x_range[0], x_range[1], y_range[0], y_range[1], statLocation)
else:
if stacked:
h_y, h_x, h = plt.hist(data_array, bins=nbins, color=stack_colors, label=stack_names, stacked=stacked)
else:
h_y, h_x, h = plt.hist(data_list, bins=nbins)
xstat, ystat = self.getStat(min(h_x), max(h_x), min(h_y), max(h_y), statLocation)
legend = ax.legend(loc='upper left')
plt.text(xstat, ystat, stat_string)
plt.title(title)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
if fileName:
plt.savefig(self.plot_dir + fileName + ".png")
plt.savefig(self.plot_dir + fileName + ".pdf")
else:
plt.savefig(self.plot_dir + name + ".png")
plt.savefig(self.plot_dir + name + ".pdf")
plt.clf()
plt.close()
# makes scatter plots and calculates constants
def plotScatter(self, info, hackColors=False):
name = info["name"]
ynames = info["ynames"]
x = info["xdata"]
ydata = info["ydata"]
title = info["title"]
xtitle = info["xtitle"]
ytitle = info["ytitle"]
plotFitTypes = info["plotfit"]
setRange = info["setrange"]
statLocation = info["statloc"]
if setRange:
x_range = info["xrange"]
y_range = info["yrange"]
f_box = ""
fig, ax = plt.subplots()
deg = 1
y_min = 10 ** 10
y_max = -10 ** 10
sipm_mean = np.mean(self.data["sipm"])
# XKCD Colors
if hackColors:
pinkish_red = "#f10c45"
azure = "#069af3"
self.colors = [pinkish_red, azure]
if len(ynames) != len(ydata):
print "The length of the ynames list should be the same as the number of y data sets."
return
if len(plotFitTypes) != len(ydata):
print "The length of the plotfit list should be the same as the number of y data sets."
return
print "number of x values: {0}".format(len(x))
for i, y in enumerate(ydata):
if min(y) < y_min:
y_min = min(y)
if max(y) > y_max:
y_max = max(y)
plotFit = plotFitTypes[i]
yname = ynames[i]
color = self.colors[i % len(self.colors)] # in case there are more y data sets than colors
print "number of y values for {0}: {1}".format(yname, len(y))
if plotFit == 1:
# calculate fit function
z = np.polyfit(x, y, deg)
f = np.poly1d(z)
f_string = str(f)
f_string = f_string.split("\n")[-1]
f_string = "{0} : f(x) = {1}".format(yname, f_string)
f_box += f_string + "\n"
print f_string
# calculate new x's and y's using fit function
x_new = np.linspace(min(x), max(x), 100)
y_new = f(x_new)
ax.plot(x,y,'o', c=color, label=yname, alpha=0.5)
ax.plot(x_new, y_new, '--', c=color, label="%s fit" % yname)
elif plotFit == 2:
# calculate fit function
popt, pcov = curve_fit(self.logarithm, x, y)
# calculate new x's and y's using fit function
x_new = np.linspace(min(x), max(x), 100)
y_new = self.logarithm(x_new, *popt)
if popt[2] >= 0.0:
f_string = "{0}: $f(x) = {1:.2f}\ \ln({2:.2f}\ x) + {3:.2f}$".format(yname, popt[0], popt[1], popt[2])
else:
f_string = "{0}: $f(x) = {1:.2f}\ \ln({2:.2f}\ x) {3:.2f}$".format(yname, popt[0], popt[1], popt[2])
print f_string
f_box += f_string + "\n"
ax.plot(x,y,'o', c=color, label="%s average" % yname, alpha=0.5)
ax.plot(x_new, y_new, '--', c=color, label="%s fit" % yname)
# calculate correction constants from fit function
if name == "rm_pd0" or name == "rm_pd1":
pd_name = name.split("_")[-1]
for cu in self.cu_list:
cu_name = "cu%d" % cu
if len(self.data[cu_name][pd_name]) != 1:
print "There is not exactly one {0} value for {1}!".format(pd_name, cu_name)
return
else: # there is only one value as expected
# the correction factor is the sipm mean divided by the expected value from the log fit
pd_value = self.data[cu_name][pd_name][0]
constant = sipm_mean / self.logarithm(pd_value, *popt)
self.constants[cu_name]["sipm_%s" % pd_name].append(constant)
print "CU {0} {1} {2}: pd_value = {3} : SiPM correction constant = {4}".format(cu, pd_name, yname, pd_value, constant)
else:
ax.plot(x,y,'o', c=color, label=yname, alpha=0.5)
if setRange:
axes = plt.gca()
axes.set_xlim(x_range)
axes.set_ylim(y_range)
xstat, ystat = self.getStat(x_range[0], x_range[1], y_range[0], y_range[1], statLocation)
else:
xstat, ystat = self.getStat(min(x), max(x), y_min, y_max, statLocation)
if f_box:
if f_box[-1] == "\n":
f_box = f_box[:-1]
ax.text(xstat, ystat, f_box)
legend = ax.legend(loc='lower right')
ax.grid(True)
plt.gcf().subplots_adjust(bottom=0.1)
plt.gcf().subplots_adjust(left=0.15)
plt.title(title)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
plt.savefig(self.plot_dir + name + ".png")
plt.savefig(self.plot_dir + name + ".pdf")
plt.clf()
plt.close()
# plot data vs iteration
def plotIterations(self, info):
name = info["name"]
ynames = info["ynames"]
x = info["xdata"]
ydata = info["ydata"]
title = info["title"]
xtitle = info["xtitle"]
ytitle = info["ytitle"]
setRange = info["setrange"]
statLocation = info["statloc"]
connect = info["connect"]
if setRange:
x_range = info["xrange"]
y_range = info["yrange"]
fig, ax = plt.subplots()
print "number of x values: {0}".format(len(x))
for i, y in enumerate(ydata):
print "number of y values for channel {0}: {1}".format(i, len(y))
if not y:
print "no y values for channel {0}".format(i)
continue
#yname = ynames[i]
color = self.colors[i % len(self.colors)] # in case there are more y data sets than colors
if connect:
ax.plot(x, y, '-o', c=color, alpha=0.5)
else:
ax.plot(x, y, 'o', c=color, alpha=0.5)
if setRange:
axes = plt.gca()
axes.set_xlim(x_range)
axes.set_ylim(y_range)
xstat, ystat = self.getStat(x_range[0], x_range[1], y_range[0], y_range[1], statLocation)
legend = ax.legend(loc='upper right')
ax.grid(True)
plt.gcf().subplots_adjust(bottom=0.1)
plt.gcf().subplots_adjust(left=0.15)
plt.title(title)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
plt.savefig(self.plot_dir + name + ".png")
plt.savefig(self.plot_dir + name + ".pdf")
plt.clf()
plt.close()
# should be used after making scatter plot, which calculates constants
def normalize(self):
self.data["norm_pd0"] = []
self.data["norm_pd1"] = []
self.data["norm_sipm"] = []
for cu in self.cu_list:
cu_name = "cu%d" % cu
# pin-diode corrections
for pd in xrange(2):
pd_name = "pd%d" % pd
if len(self.data[cu_name][pd_name]) != 1:
print "There is not exactly one {0} value for {1}!".format(pd_name, cu_name)
return
else: # there is only one value as expected
# the correction factor is the pd mean divided by the orivinal pd value
pd_value = self.data[cu_name][pd_name][0]
pd_mean = np.mean(self.data[pd_name])
constant = pd_mean / pd_value
self.constants[cu_name][pd_name] = constant
averagePindiodeCorrection = np.mean([self.constants[cu_name]["pd0"], self.constants[cu_name]["pd1"]])
self.constants[cu_name]["pd_ave"] = averagePindiodeCorrection
print "CU {0} pin-diode correction constant : {1}".format(cu, self.constants[cu_name]["pd_ave"])
for pd in xrange(2):
pd_name = "pd%d" % pd
pd_value = self.data[cu_name][pd_name][0]
averagePindiodeCorrection = self.constants[cu_name]["pd_ave"]
self.data["norm_%s" % pd_name].append(averagePindiodeCorrection * pd_value)
# sipm corrections
averageSipmCorrections = list(np.mean([self.constants[cu_name]["sipm_pd0"][i], self.constants[cu_name]["sipm_pd1"][i]]) for i in xrange(4))
self.constants[cu_name]["sipm_ave"] = averageSipmCorrections
print "CU {0} SiPM correction constants : {1}".format(cu, self.constants[cu_name]["sipm_ave"])
for rm in xrange(1,5):
rm_name = "rm%d" % rm
for d in self.data[cu_name][rm_name]:
# the normalized value is the constant (sipm mean / expected value from log fit) times the original value
self.data["norm_sipm"].append(self.constants[cu_name]["sipm_ave"][rm-1] * d)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--histo_config", "-c", default="config/final_histo.json", help="json config file for histograms")
parser.add_argument("--iteration_config", "-i", default="config/stability_iterations.json", help="json config file for iteration plots")
parser.add_argument("--scatter_config", "-s", default="config/final_scatter.json", help="json config file for scatter plots")
parser.add_argument("--data_dir", "-d", default="Nov17-18_Final_CU_Data", help="directory containing data tables")
parser.add_argument("--plot_dir", "-p", default="Nov17-18_Final_Plots", help="directory to save plots")
options = parser.parse_args()
plot_dir = options.plot_dir
data_dir = options.data_dir
histo_config = options.histo_config
iteration_config = options.iteration_config
scatter_config = options.scatter_config
# calls getData to set self.data on initialization
p = Plotter(data_dir, plot_dir)
# choose which plots to create
makeScatter = True
makeIterations = False
makeHistos = True
makeHistosPerCU = True
######################
# make scatter plots #
######################
if makeScatter:
# x data is one data set
# y data is a list of data sets
# multiple y data sets are plotted against one x data set
pd0_data = p.data["pd0_ave"]
pd1_data = p.data["pd1_ave"]
rm_data = list(p.data["rm{0}_ave".format(j)] for j in xrange(1,5))
# list of dictionaries containing plot information
with open(scatter_config) as json_file:
info = json.load(json_file)
info["pd1_pd0"]["xdata"] = pd0_data
info["pd1_pd0"]["ydata"] = [pd1_data]
info["rm_pd0"]["xdata"] = pd0_data
info["rm_pd0"]["ydata"] = rm_data
info["rm_pd1"]["xdata"] = pd1_data
info["rm_pd1"]["ydata"] = rm_data
info["rm_pd1_pd0"]["xdata"] = pd0_data
info["rm_pd1_pd0"]["ydata"] = rm_data + [pd1_data]
for key in info:
p.plotScatter(info[key])
# important for normalized SiPM plots
p.normalize()
########################
# make iteration plots #
########################
if makeIterations:
iteration_data = p.data["iteration"]
pd0_data = p.data["pd0"]
pd1_data = p.data["pd1"]
with open(iteration_config) as json_file:
info = json.load(json_file)
sipm_data = list(p.data["rm%d_sipm%d" % (rm, sipm)] for sipm in xrange(48) for rm in xrange(1,5))
info["rm_pd_stability"]["xdata"] = iteration_data
info["rm_pd_stability"]["ydata"] = sipm_data + [pd0_data] + [pd1_data]
for key in info:
p.plotIterations(info[key])
###################
# make histograms #
###################
if makeHistos:
with open(histo_config) as json_file:
info = json.load(json_file)
for key in info:
p.plotHisto(p.data, info[key])
#########################
# make one histo per CU #
#########################
if makeHistosPerCU:
cu_info = {}
cu_info["name"] = ""
cu_info["title"] = ""
cu_info["xtitle"] = "Max Charge (pC)"
cu_info["ytitle"] = "Number of Channels"
cu_info["nbins"] = 25
cu_info["units"] = "pC"
cu_info["setrange"] = 1
cu_info["statloc"] = 1
cu_info["xrange"] = [0, 2500]
cu_info["yrange"] = [0, 100]
for key in p.data:
if "cu" in key:
print "Plot histogram for {0}".format(key)
cu_number = key.split("cu")[-1]
cu_info["name"] = "sipm"
cu_info["title"] = "SiPM Max Charge for CU {0}".format(cu_number)
p.plotHisto(p.data[key], cu_info, "%s_sipm" % key)
# make stacked histograms per CU per RM
print "Plot stacked histograms for {0}".format(key)
cu_info["name"] = "stacked_sipm"
# check ordering. may need to be a transposed np.array
cu_rm_data = {}
cu_rm_data["stacked_sipm"] = list(p.data[key]["rm%d" % rm] for rm in xrange(1,5))
p.plotHisto(cu_rm_data, cu_info, "%s_stacked_sipm" % key, stacked=True)
|
from app.models import Location, City
class LocationBuilder(object):
def __init__(self, city_name, street='', support=0):
city = City.objects.create(name=city_name)
self.location = Location.objects.create(city=city, street=street, support=support)
def with_street(self, street):
self.location.street = street
return self
def with_support(self, support):
self.location.street = support
return self
def with_note(self, note):
self.location.note = note
return self
def build(self):
return self.location
|
"""
Write a program to capture any filename from the keyboard and display its filename and extension separately
Enter any filename : hello.py
Filename : hello
Extension : py
"""
filename = input("Enter the Filename with extention:")
data = (filename.split('.'))
print("Filename :", data[0])
print("extention :", data[1])
"""
Write a program to capture two numbers separately from the keyboard and display its sum
Enter first number : 10
Enter second number : 20
Sum of the number : 30
"""
vala = int(input("Enter the first no. :"))
valb = int(input("Enter the second no. :"))
total = sum([vala, valb])
print("Total sum of given numbers is :", total)
|
from Bio import SeqIO
import pandas as pd
'''
print("start?")
start = input('That is :')
print("end?")
end = input('That is :')
'''
aimseq = []
site = []
prid = []
n = 0
for seq_record in SeqIO.parse("TIR.fasta", "fasta"):
n = n + 1
for index,AA in enumerate(seq_record):
if AA == 'C':
print(n,index)
if index < 25:
c1 = (25-index)*'*' + str(seq_record.seq[0:(index+25)])
aimseq.append(c1)
print(c1)
if (index + 25) > len(seq_record.seq):
c2 = str(seq_record.seq[(index-25):]) + (25-(len(seq_record.seq)-index)+1)*'*'
aimseq.append(c2)
print(c2)
else:
c3 = str(seq_record.seq[(index-25):(index+25)])
aimseq.append(c3)
print(c3)
site.append(index)
prid.append(seq_record.id[3:9])
print('\t')
while aimseq.count('')>0:
aimseq.remove('')
data = {"uniprotID":prid,"site":site,"sequence":aimseq}
dfend = pd.DataFrame(data)
dfend.to_excel('TIRseqC.xlsx')
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
import random
from .models import *
from instagram import functions
from datetime import datetime, timezone,timedelta
now = datetime.now(timezone.utc)
from payment.payment_functions import *
from django.template.loader import render_to_string
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.core.mail import EmailMessage
def create_packages(request):
packages = [ ['deneme','Socinsta Deneme Paketi',3,1,0],
['temel','Socinsta Temel Paket',1,0,0],
['haftalık','Socinsta Haftalık Temel Paket',7,1,75],
['bireysel','Socinsta Aylık Bireysel Paket',30,1,150],
['profesyonel','Socinsta 3 Aylık Profesyonel Paket',90,3,300],
['premium','Socinsta 6 Aylık Premium Paket',180,5,500],
]
for i in packages:
if Package.objects.filter(name=i[0]):
packagex = Package.objects.filter(name=i[0])[0]
else:
package = Package(name=i[0],description=i[1],offered_days=i[2],account_count=i[3],package_price=i[4])
package.save()
packagex = package
if not License.objects.filter(main_user=request.user).exists():
License(main_user = request.user, package = packagex, status=1).save()
return redirect("/profile/")
@login_required(login_url='/login/')
def add_cart(request):
if request.POST:
card_datas = {}
if Card.objects.filter(main_user=request.user, payment_status=9).exists():
current_card = Card.objects.filter(main_user=request.user, payment_status=9)[0]
current_card.package = Package.objects.filter(name=request.POST.get('package_name'))[0]
current_card.updated_time= datetime.now(timezone.utc)
current_card.save()
card_datas = read_card(current_card.order_id)
elif Card.objects.filter(main_user=request.user, payment_status=2).exists():
return redirect("/payment/havale")
else:
new_card = Card(main_user=request.user, payment_status=9)
order_id = random.randint(100000, 999999)
while Card.objects.filter(order_id=order_id).exists():
order_id = random.randint(100000, 999999)
new_card.order_id = order_id
new_card.package = Package.objects.filter(name=request.POST.get('package_name'))[0]
new_card.payment_status = 9
new_card.updated_time= datetime.now(timezone.utc)
new_card.save()
card_datas = read_card(order_id)
return render(request, 'card.html', card_datas)
else:
return render(request, 'pricing.html')
@login_required(login_url='/login/')
def add_coupon(request):
if request.POST:
card_datas = {}
coupon_name = request.POST.get('coupon').upper()
current_card = Card.objects.filter(main_user=request.user, payment_status=9)[0]
if Coupon.objects.filter(name = coupon_name).exists():
coupon = Coupon.objects.filter(name = coupon_name)[0]
if coupon.status == 1:
current_card.coupon = coupon
current_card.updated_time= datetime.now(timezone.utc)
current_card.save()
card_datas = read_card(current_card.order_id)
card_datas['pop_up'] = "coupon_success()"
return render(request, 'card.html', card_datas)
else:
card_datas = read_card(current_card.order_id)
card_datas['pop_up'] = "coupon_ended()"
return render(request, 'card.html', card_datas)
else:
card_datas = read_card(current_card.order_id)
card_datas['coupon_code'] = coupon_name
card_datas['pop_up'] = "coupon_notfind()"
return render(request, 'card.html', card_datas)
else:
return render(request, 'pricing.html')
@login_required(login_url='/login/')
def remove_item(request):
if request.POST:
current_card = Card.objects.filter(main_user=request.user, payment_status=9)[0]
item = request.POST.get('remove')
if item =='package':
current_card.package = None
current_card.coupon = None
current_card.updated_time= datetime.now(timezone.utc)
current_card.save()
return render(request, 'pricing.html')
elif item == 'coupon':
current_card.coupon = None
current_card.updated_time= datetime.now(timezone.utc)
current_card.save()
card_datas = read_card(current_card.order_id)
return render(request, 'card.html', card_datas)
else:
return render(request, 'pricing.html')
else:
return render(request, 'pricing.html')
@csrf_exempt
def callback(request):
if request.POST:
order_id = request.POST.get('platform_order_id')
status = request.POST.get('status')
signature = request.POST.get('signature')
card = Card.objects.filter(order_id = order_id)[0]
user_license = License.objects.filter(main_user=card.main_user)[0]
instagram_accounts=functions.get_linked_accounts(card.main_user)
check_linked_assistants_list=functions.check_linked_assistans(card.main_user)
if status =='success':
buyed_license = License.objects.filter(main_user=card.main_user)[0]
buyed_license.package = card.package
buyed_license.created_date = datetime.now(timezone.utc)
buyed_license.status = 1
buyed_license.save()
card.payment_status = 1
card.save()
instagram_accounts=functions.get_linked_accounts(card.main_user)
check_linked_assistants_list=functions.check_linked_assistans(card.main_user)
try:
license_datas = functions.license_data(card.main_user)
except:
license_datas = 0
return render(request,"profile.html",{"package_name":card.package.description,"pop_up":"payment_success()","wow":"none","wow2":"block","challenge_code":"none","user":card.main_user.first_name + ' ' + card.main_user.last_name,"ig_accounts":instagram_accounts,"number":len(instagram_accounts),"assistants_list":check_linked_assistants_list,"license_data":license_datas,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none"})
else:
try:
license_datas = functions.license_data(card.main_user)
except:
license_datas = 0
return render(request,"profile.html",{"pop_up":"payment_failed()","wow":"none","wow2":"block","challenge_code":"none","user":request.user,"ig_accounts":instagram_accounts,"number":len(instagram_accounts),"assistants_list":check_linked_assistants_list,"license_data":license_datas,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none"})
else:
return render(request, 'pricing.html')
def havale(request):
if request.POST:
card = Card.objects.filter(order_id = request.POST["order_id"])[0]
card_datas = read_card(card.order_id)
post_type = request.POST["post_type"]
if post_type == "1":
card.payment_status = 2
card.save()
current_site = get_current_site(request)
email1 = "ismcagilci@gmail.com"
email2 = "bedriyan@gmail.com"
message = render_to_string('payment_confirmation.html' ,{
'user': request.user,
'order_id' : request.POST["order_id"],
'payment_amount' : card.package.package_price
})
email = EmailMessage("Ödeme onayı", message, to=[email1,email2])
email.send()
return render(request,"havale_onay.html",card_datas)
elif post_type == "2":
card.payment_status = 9
card.save()
return redirect("/pricing/")
else:
card = Card.objects.filter(main_user = request.user, payment_status=9)
if card:
card = card[0]
card_datas = read_card(card.order_id)
return render(request,"havale.html",card_datas)
else:
card = Card.objects.filter(main_user = request.user, payment_status=2)
card = card[0]
card_datas = read_card(card.order_id)
return render(request,"havale_onay.html",card_datas)
|
# Inventory Categories
CAT_SKILLS = 16
# Market Groups We Care About
market_group_ammo = 11
market_group_drones = 157
market_group_implants = 24
market_group_ship_equipment = 9
market_group_ships = 4
market_groups_filter = [
market_group_ammo,
market_group_drones,
market_group_implants,
market_group_ship_equipment,
market_group_ships,
]
# Dogma Effects
low_slot = 11
high_slot = 12
mid_slot = 13
rig_slot = 2663
sub_system_slot = 3772
# Dogma Effects: Slots
dogma_effects_slots = [
high_slot,
mid_slot,
low_slot,
rig_slot,
sub_system_slot,
]
|
#!/usr/bin/python3
import sys
from server import *
from client import client
from time import sleep
# print ("This is the name of the script: ", sys.argv[0])
# print ("Number of arguments: ", len(sys.argv))
# print ("The arguments are: " , str(sys.argv))
if __name__ == "__main__":
global stopServer
stopServer = False
# port 0 means to select an arbitrary unused port
HOST, PORT = "0.0.0.0", 0
if len(sys.argv) > 1:
print ("The port number to use is: ", sys.argv[1])
PORT = int ( sys.argv[1] )
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
server.initialise()
ip, port = server.server_address
print ( "Port:", port, type(port))
print ( "IP:", ip, type(ip))
# start a thread with the server.
# the thread will then start one more thread for each request.
server_thread = threading.Thread(target=server.serve_forever)
# exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread:", server_thread.name)
# server_thread.join()
while stopServer == False:
pass
server.shutdown()
# while server_thread.isAlive():
# pass
# client(ip, port, "Hello World 1")
# client(ip, port, "Hello World 2")
# client(ip, port, "Hello World 3")
# main tread runs server, other threads are spun off for received messages etc
# server.serve_forever()
# global stopServer
# stopServer = False
# while stopServer:
# pass
# print ("finished")
# server.shutdown()
# print ("server shutdown")
# # exit the server thread when the main thread terminates
# server_thread.daemon = False
# # exit main when server is killed
# # server_thread.daemon = True
# server_thread.start()
# print("Server loop running in thread:", server_thread.name)
# while server_thread.isAlive():
# pass
# server.shutdown()
# # sleep(1000)
# # client(ip, port, "Hello World 1")
# # client(ip, port, "Hello World 2")
# # client(ip, port, "Hello World 3")
# # client(ip, port, "HELO text\n")
# # client(ip, port, "KILL_SERVICE\n")
# # server.shutdown()
|
#진법 표현
print(0b10) # 2진법
print(0o10) # 8진법
print(10) # 10진법 -디폴트
print(0x10) # 16진법
print("{:b} {:o} {} {:x}".format(10,10,10,10))
|
import tempfile
import numpy as np
from flask import current_app
from scipy.io.wavfile import write
from app.main import synthesizer
from app.main import vocoder
from app.main import client
from app.main.util.transliterate import translit
from app.main.util.tacotron.model import Synthesizer
from app.main.util.vocoder.vocoder import infer_waveform
def voice_text(voice_id, query_id, text):
embed = None
with tempfile.TemporaryFile(mode='w+b') as f:
client.download_fileobj(
current_app.config['BUCKET_NAME'],
f'{voice_id}.npy',
f
)
f.seek(0)
embed = np.load(f, allow_pickle=True)
texts = [translit(t) for t in text.split("\n")]
embeds = np.stack([embed] * len(texts))
specs = synthesizer.synthesize_spectrograms(texts, embeds)
breaks = [spec.shape[1] for spec in specs]
spec = np.concatenate(specs, axis=1)
wav = infer_waveform(vocoder, spec)
b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size)
b_starts = np.concatenate(([0], b_ends[:-1]))
wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)]
breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks)
wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)])
wav = wav / np.abs(wav).max() * 0.97
result = tempfile.TemporaryFile()
write(result, Synthesizer.sample_rate, wav)
result.seek(0)
client.upload_fileobj(result, current_app.config['BUCKET_NAME'], f'{query_id}.wav')
return {
'status': 'success',
'message': 'Text was voiced'
}, 200
|
import os
import downloadEmail
emailAddress = os.environ.get("python_email")
emailPw = os.environ.get("python_password")
downloadEmail.downloadEmails(emailAddress, emailPw, 'therealnicola@gmail.com')
|
'''
Created on Nov 1, 2011
@author: jason
'''
import simplejson
import MongoEncoder.MongoEncoder
from Map.BrowseTripHandler import BaseHandler
class RealTimeSearchAllHandler(BaseHandler):
def get(self, name):
_name = name.upper()
objects = []
users = self.syncdb.users.find({'lc_username': {'$regex':'^'+_name}}).limit(5)
trips = self.syncdb.trips.find({'lc_tripname': {'$regex':'^'+_name}}).limit(5)
guides = self.syncdb.guides.find({'lc_guidename': {'$regex':'^'+_name}}).limit(5)
sites = self.syncdb.sites.find({'lc_sitename': {'$regex':'^'+_name}}).limit(5)
if users.count()>0:
objects.append(users)
if trips.count()>0:
objects.append(trips)
if guides.count()>0:
objects.append(guides)
if sites.count()>0:
objects.append(sites)
if len(objects) >0 :
self.write(unicode(simplejson.dumps(objects, cls=MongoEncoder.MongoEncoder.MongoEncoder)))
else:
self.write('not found');
class SearchUserHandler(BaseHandler):
def get(self, name):
_name = name.upper()
users = self.syncdb.users.find({'lc_username': {'$regex':'^'+_name}})
if users.count() >= 1 :
self.write(self.render_string("Module/searchpeopleresult.html", searchuserresults = users))
else:
self.write(self.render_string("Module/searchpeopleresult.html", searchuserresults = None))
class RealTimeSearchUserHandler(BaseHandler):
def get(self, name):
#objects = []
_name = name.upper()
users = self.syncdb.users.find({'lc_username': {'$regex':'^'+_name}})
if users.count() >0 :
#objects.append(users)
self.write(unicode(simplejson.dumps(users, cls=MongoEncoder.MongoEncoder.MongoEncoder)))
else:
self.write('not found');
class SearchFriendHandler(BaseHandler):
def get(self, name):
_name = name.upper()
users = self.syncdb.users.find({'lc_username': {'$regex':'^'+_name}})
if users.count() >= 1 :
self.write(self.render_string("Module/searchfriendresult.html", searchuserresults = users))
else:
self.write(self.render_string("Module/searchpeopleresult.html", searchuserresults = None))
|
import requests
from bs4 import BeautifulSoup
import json
import time
import datetime
import pymysql
from config import *
def get_html(url,data):
'''
:param url:请求的url地址
:param data: 请求的参数
:return: 返回网页的源码html
'''
response = requests.get(url,data)
return response.text
def parse_html(html):
'''
:param html: 传入html源码
:return: 通过yield生成一个生成器,存储爬取的每行信息
'''
soup = BeautifulSoup(html, 'lxml')
table = soup.find("table", attrs={"id": "report"})
trs = table.find("tr").find_next_siblings()
for tr in trs:
tds = tr.find_all("td")
yield [
tds[0].text.strip(),
tds[1].text.strip(),
tds[2].text.strip(),
tds[3].text.strip(),
tds[4].text.strip(),
tds[5].text.strip(),
tds[6].text.strip(),
tds[7].text.strip(),
tds[8].text.strip(),
]
def write_to_file(content):
'''
:param content:要写入文件的内容
'''
with open("result.txt",'a',encoding="utf-8") as f:
f.write(json.dumps(content,ensure_ascii=False)+"\n")
def write_to_mysql(content):
db = pymysql.connect(DB_HOST, DB_USER, DB_PWD, DB_NAME, charset='utf8')
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
sql = "INSERT INTO hshfy(court, court_room, court_date, case_no, undertaking_department, presiding_judge, plaintiff, defendant) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % (content[0], content[1], content[2], content[3], content[4], content[5], content[6], content[7])
cursor.execute(sql)
def get_page_nums():
'''
:return:返回的是需要爬取的总页数
'''
base_url = "http://www.hshfy.sh.cn/shfy/gweb/ktgg_search_content.jsp?"
date_time = datetime.date.fromtimestamp(time.time())
data = {
"pktrqks": date_time,
"ktrqjs": date_time,
}
while True:
html = get_html(base_url,data)
soup = BeautifulSoup(html, 'lxml')
if soup.body.text.strip() == "系统繁忙":
print("系统繁忙,登录太频繁,ip被封锁")
time.sleep(ERROR_SLEEP_TIME)
continue
else:
break
res = soup.find("div",attrs={"class":"meneame"})
page_nums = res.find('strong').text
#这里获得page_nums是一个爬取的总条数,每页是15条数据,通过下面方法获取总页数
page_nums = int(page_nums)
if page_nums %15 == 0:
page_nums = page_nums//15
else:
page_nums = page_nums//15 + 1
print("总页数:",page_nums)
return page_nums
def main():
'''
这里是一个死循环爬取数据
'''
page_nums = get_page_nums()
if not True:
return
base_url = "http://www.hshfy.sh.cn/shfy/gweb/ktgg_search_content.jsp?"
while True:
date_time = datetime.date.fromtimestamp(time.time())
page_num = 1
data = {
"pktrqks": date_time,
"ktrqjs": date_time,
"pagesnum":page_num
}
while page_num <= page_nums:
print(data)
while True:
html = get_html(base_url, data)
soup = BeautifulSoup(html, 'lxml')
if soup.body.text.strip() == "系统繁忙":
print("系统繁忙,登录太频繁,ip被封锁")
time.sleep(ERROR_SLEEP_TIME)
continue
else:
break
res = parse_html(html)
for i in res:
write_to_mysql(i)
print("爬取完第【%s】页,总共【%s】页" %(page_num,page_nums))
page_num+=1
data["pagesnum"] = page_num
time.sleep(1)
else:
print("爬取完毕")
print("开始休眠.......")
time.sleep(SLEEP_TIME)
if __name__ == '__main__':
main()
|
# -*- coding:utf-8 -*-
'''
Created on 2016��3��31��
@author: huke
'''
def adventureFeature():
L = []
n = 1
while n <= 99:
L.append(n)
n+=2
print(L)
if __name__ == '__main__':
adventureFeature()
|
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from random import randint
import config
class dataSet:
def __init__(self, seed, tag, path, width=config.image_width, height=config.image_height):
self.seed = seed
self.tag = tag
self.img_set = os.listdir(path)
self.path = path
self.width = width
self.height = height
self.gen_img = (img for img in self.img_set)
self.pos = 0
def show(self, rgb):
im = Image.fromarray((rgb * 255).astype('uint8'))
im.show()
def handler(self, img):
image = Image.open(img)
x, y = self.seed[self.pos]
x = x % (image.width - self.width)
y = y % (image.height - self.height)
if self.tag:
x *= config.ratio
y *= config.ratio
self.pos += 1
im = np.asarray(image.crop((x, y, x + self.width, y + self.height))) / 255 * 2 - 1
return im
def batch(self, batch_size=config.batch_size):
return np.asarray([self.handler(os.path.join(self.path, next(self.gen_img)))
for i in range(batch_size)])
def load_data():
seed = [(randint(0, 127), randint(0, 127)) for x in range(config.image_num)]
return (dataSet(seed, 0, config.data_train_LR, config.image_width, config.image_height),
dataSet(seed, 1, config.data_train_HR, config.origin_width, config.origin_height))
if __name__ == '__main__':
X_train, y_train = load_data()
X_train.batch()
y_train.batch()
|
from os.path import join
import sys
from invoke import ctask as task, Collection
# Underscored func name to avoid shadowing kwargs in build()
@task(name='clean')
def _clean(c):
"""
Nuke docs build target directory so next build is clean.
"""
c.run("rm -rf {0}".format(c.sphinx.target))
# Ditto
@task(name='browse')
def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index))
@task(default=True, help={
'opts': "Extra sphinx-build options/args",
'clean': "Remove build tree before building",
'browse': "Open docs index in browser after building",
'warn': "Build with stricter warnings/errors enabled",
})
def build(c, clean=False, browse=False, warn=False, opts=None):
"""
Build the project's Sphinx docs.
"""
if clean:
_clean(c)
if opts is None:
opts = ""
if warn:
opts += " -n -W"
cmd = "sphinx-build{2} {0} {1}".format(
c.sphinx.source,
c.sphinx.target,
(" " + opts) if opts else "",
)
c.run(cmd, pty=True)
if browse:
_browse(c)
@task
def tree(c):
ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates"
c.run("tree -Ca -I \"{0}\" {1}".format(ignore, c.sphinx.source))
# Vanilla/default/parameterized collection for normal use
ns = Collection(_clean, _browse, build, tree)
ns.configure({
'sphinx': {
'source': 'docs',
# TODO: allow lazy eval so one attr can refer to another?
'target': join('docs', '_build'),
'target_file': 'index.html',
}
})
# Multi-site variants, used by various projects (fabric, invoke, paramiko)
# Expects a tree like sites/www/<sphinx> + sites/docs/<sphinx>,
# and that you want 'inline' html build dirs, e.g. sites/www/_build/index.html.
def _site(name, build_help):
_path = join('sites', name)
# TODO: turn part of from_module into .clone(), heh.
self = sys.modules[__name__]
coll = Collection.from_module(self, name=name, config={
'sphinx': {
'source': _path,
'target': join(_path, '_build')
}
})
coll['build'].__doc__ = build_help
return coll
# Usage doc/API site (published as e.g. docs.myproject.org)
docs = _site('docs', "Build the API docs subsite.")
# Main/about/changelog site (e.g. (www.)?myproject.org)
www = _site('www', "Build the main project website.")
|
import vk_api
import random
import os
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_bot import vkBot
token = os.environ.get('BOT_TOKEN')
vk = vk_api.VkApi(token=token)
longpoll = VkLongPoll(vk)
def write_msg(user_id, message):
vk.method('messages.send', {'user_id': user_id, 'message': message, "random_id": random.randint(0, 1000)})
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW:
if event.to_me:
# print('New message:')
# print(f'For me by: {event.user_id}', end=' ')
bot = vkBot(event.user_id)
message = bot.new_msg(event.text)
if isinstance(message, list):
write_msg(event.user_id, 'Время | Предмет | Аудитория | Неделя')
new_string = ''
for row in message:
write_msg(event.user_id, str(row)[1:-1].replace(', None', '').replace(',', ' | '))
else:
write_msg(event.user_id, bot.new_msg(event.text))
# print('Text: ', event.text)
|
# Four Codes that break Python
# Name Error
games()
#games is an undefined function
# Syntax Error
if 4 $ 5:
# "$" isn't an operator
# Type Error
games = 42
for i in games:
print i
# Not able to loop through an int
# Attribute Error
None.lower()
# None has no attribute to lower
|
#!/usr/bin/env python3
import rmt_py_wrapper
import json
import sys, getopt
import time
import psutil
import socket
def usage():
print("Usage:")
print("\t-g | --get_config")
print("\t-s | --set_config")
print("\t-n eth0 | --net-intf eth0")
print("\t--send_file")
print("\t--recv_file")
print("Example:")
print("\t./server_example.py -gs -n eth0")
def get_config(dev_list, dev_num):
r"""
Get current configuration and status from the given devices
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_get_info()
"""
# Create config key string
config_list = ["cpu", "ram", "hostname", "wifi"]
config_key_str = ""
for item in config_list:
config_key_str += item + ';'
# Get device info list
id_list = rmt_py_wrapper.ulong_array(dev_num)
for i in range(0, dev_num):
id_list[i] = dev_list[i].deviceID
info_num_ptr = rmt_py_wrapper.new_intptr()
info_list = rmt_py_wrapper.data_info_list.frompointer(rmt_py_wrapper.rmt_server_get_info(id_list, dev_num, config_key_str, info_num_ptr))
info_num = rmt_py_wrapper.intptr_value(info_num_ptr)
rmt_py_wrapper.delete_intptr(info_num_ptr) # release info_num_ptr
print("=== get config result ===")
config_data = []
for i in range(0, info_num):
# Split the result string into dictionary data
result_list = info_list[i].value_list.split(";")
dict_data = {"deviceID": info_list[i].deviceID}
print("deviceID=%d" % info_list[i].deviceID)
for item in result_list:
for key in config_list:
if key in item:
dict_data[key] = item[len(key)+1:]
# print(dict_data)
config_data.append(dict_data)
result = json.dumps(config_data, indent=4)
print(result)
# Free info_list
rmt_py_wrapper.rmt_server_free_info(info_list.cast())
return config_data
def set_diff_config():
r"""
Set different configurations or states to the given devices
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_set_info()
"""
# Create data_info_array to set config
dev_num = 2
data_info_array = rmt_py_wrapper.new_data_info_array(dev_num)
# Set for device 5566:
data_info_element = rmt_py_wrapper.data_info()
data_info_element.deviceID = 5566
data_info_element.value_list = "hostname:rqi-1234;locate:on"
dev_idx = 0
rmt_py_wrapper.data_info_array_setitem(data_info_array, dev_idx, data_info_element)
# Set for device 6166:
data_info_element.deviceID = 6166
data_info_element.value_list = "hostname:new_hostname;locate:on"
dev_idx = 1
rmt_py_wrapper.data_info_array_setitem(data_info_array, dev_idx, data_info_element)
# Print what we want to set in data_info_array
print("=== set diff config req ===")
dev_idx = 0
data_info_element = rmt_py_wrapper.data_info_array_getitem(data_info_array, dev_idx)
print("deviceID=%d" % data_info_element.deviceID)
print("value_list=%s" % data_info_element.value_list)
dev_idx = 1
data_info_element = rmt_py_wrapper.data_info_array_getitem(data_info_array, dev_idx)
print("deviceID=%d" % data_info_element.deviceID)
print("value_list=%s" % data_info_element.value_list)
# Send data_info_array to RMT library
info_num_ptr = rmt_py_wrapper.new_intptr()
info_list = rmt_py_wrapper.data_info_list.frompointer(rmt_py_wrapper.rmt_server_set_info(data_info_array, dev_num, info_num_ptr))
info_num = rmt_py_wrapper.intptr_value(info_num_ptr)
rmt_py_wrapper.delete_intptr(info_num_ptr) # release info_num_ptr
print("=== set diff config result ===")
config_data = []
for i in range(0, info_num):
# Split the result string into dictionary data
result_list = info_list[i].value_list.split(";")
dict_data = {"deviceID": info_list[i].deviceID}
# print(info_list[i].deviceID)
# print(info_list[i].value_list)
for item in result_list:
key_value_pair = item.split(":")
if len(key_value_pair) > 1:
key = key_value_pair[0]
value = key_value_pair[1]
dict_data[key] = value
# print(dict_data)
config_data.append(dict_data)
result = json.dumps(config_data, indent=4)
print(result)
# Free info_list
rmt_py_wrapper.rmt_server_free_info(info_list.cast())
def set_same_config():
r"""
Set the same configuration or state to each given devices
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_set_info_with_same_value()
"""
# Prepare mock data for setting config
dev_num = 2
id_list = rmt_py_wrapper.ulong_array(dev_num)
id_list[0] = 5566
id_list[1] = 5567
config_str = "hostname:rqi-1234;locate:on"
# Send data_info_array to RMT library
info_num_ptr = rmt_py_wrapper.new_intptr()
info_list = rmt_py_wrapper.data_info_list.frompointer(rmt_py_wrapper.rmt_server_set_info_with_same_value(id_list, dev_num, config_str, info_num_ptr))
info_num = rmt_py_wrapper.intptr_value(info_num_ptr)
rmt_py_wrapper.delete_intptr(info_num_ptr) # release info_num_ptr
print("=== set same config result ===")
config_data = []
for i in range(0, info_num):
# Split the result string into dictionary data
result_list = info_list[i].value_list.split(";")
dict_data = {"deviceID": info_list[i].deviceID}
# print(info_list[i].deviceID)
# print(info_list[i].value_list)
for item in result_list:
key_value_pair = item.split(":")
if len(key_value_pair) > 1:
key = key_value_pair[0]
value = key_value_pair[1]
dict_data[key] = value
# print(dict_data)
config_data.append(dict_data)
result = json.dumps(config_data, indent=4)
print(result)
# Free info_list
rmt_py_wrapper.rmt_server_free_info(info_list.cast())
def discover():
r"""
Discover all the available agents in the same network
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_create_device_list()
"""
num_ptr = rmt_py_wrapper.new_intptr()
dev_list = rmt_py_wrapper.device_info_list.frompointer(rmt_py_wrapper.rmt_server_create_device_list(num_ptr))
num = rmt_py_wrapper.intptr_value(num_ptr)
rmt_py_wrapper.delete_intptr(num_ptr) # release num_ptr
# Put data in JSON format
data = {"total": num, "items": []}
items = []
for i in range(0, num):
item = {
"ID": dev_list[i].deviceID,
"Model": dev_list[i].model,
"Host": dev_list[i].host,
"IP": dev_list[i].ip,
"MAC": dev_list[i].mac,
"RMT_VERSION": dev_list[i].rmt_version,
"Device_Info": dev_list[i].devinfo
}
items.append(item)
print("=== discover result ===")
data["items"] = items
result = json.dumps(data, indent=4)
print(result)
return dev_list, num
def test_send_binary():
r"""
Send a binary file to the target device(s)
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_send_file()
- rmt_py_wrapper.rmt_server_get_result()
"""
print("=== test send binary ===")
custom_callback = "custom_callback"
filename = "my_testfile"
bytes_buffer = b"a\0bc\r\ndef\tg" # convert to bytes
dev_num = 1
target_id = 6166
id_list = rmt_py_wrapper.ulong_array(dev_num)
id_list[0] = target_id
agent_status = rmt_py_wrapper.rmt_server_send_file(id_list, dev_num, custom_callback, filename, bytes_buffer)
print("send_file: agent_status=%d" % agent_status)
agent_status, result, byte_array = rmt_py_wrapper.rmt_server_get_result(target_id)
while agent_status == rmt_py_wrapper.STATUS_RUNNING:
print("sleep for 1 second")
time.sleep(1)
agent_status, result, byte_array = rmt_py_wrapper.rmt_server_get_result(target_id)
print("get_result: agent_status=%d" % agent_status)
print("transfer_result=%d" % result)
print(bytes(byte_array).decode("utf-8"))
def test_recv_binary():
r"""
Retrieve a binary file from a target device
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_recv_file()
- rmt_py_wrapper.rmt_server_get_result()
"""
print("=== test recv binary ===")
target_id = 6166
custom_callback = "custom_callback"
filename = "my_testfile"
agent_status = rmt_py_wrapper.rmt_server_recv_file(target_id, custom_callback, filename)
print("recv_file: agent_status=%d" % agent_status)
agent_status, result, byte_array = rmt_py_wrapper.rmt_server_get_result(target_id)
while agent_status == rmt_py_wrapper.STATUS_RUNNING:
print("sleep for 1 second")
time.sleep(1)
agent_status, result, byte_array = rmt_py_wrapper.rmt_server_get_result(target_id)
print("get_result: agent_status=%d" % agent_status)
print("transfer_result=%d" % result)
print("file_len=%d" % len(byte_array))
print("=== file content start ===")
print(bytes(byte_array).decode("utf-8"))
print("=== file content end ===")
def main(args):
r"""
Init and de-init RMT server library
The following APIs are used to implement this function:
- rmt_py_wrapper.rmt_server_version()
- rmt_py_wrapper.rmt_server_configure()
- rmt_py_wrapper.rmt_server_init()
- rmt_py_wrapper.rmt_server_deinit()
"""
def valid_interface(interface):
interface_addrs = psutil.net_if_addrs().get(interface) or []
return socket.AF_INET in [snicaddr.family for snicaddr in interface_addrs]
try:
opts, args = getopt.getopt(args, "hgsn:", ["help", "get_config", "set_config", "send_file", "recv_file", "net_intf="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
flag_get_config = False
flag_set_config = False
flag_send_file = False
flag_recv_file = False
my_interface = ""
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-g", "--get_config"):
flag_get_config = True
elif opt in ("-s", "--set_config"):
flag_set_config = True
elif opt in ("--send_file"):
flag_send_file = True
elif opt in ("--recv_file"):
flag_recv_file = True
elif opt in ("-n", "--net-intf"):
my_interface = arg
if not valid_interface(my_interface):
print("Interface({}) is invalid or inactive.".format(my_interface))
sys.exit(2)
else:
assert False, "unhandled option"
# Get RMT_VERSION
print("RMT_VERSION=%s" % rmt_py_wrapper.rmt_server_version())
# Set network interface for DDS communication
print("Use interface({}) for RMT server".format(my_interface))
rmt_py_wrapper.rmt_server_configure(my_interface, 0)
# Init RMT server
rmt_py_wrapper.rmt_server_init()
# Discovery devices
dev_list, num = discover()
# Get config
if flag_get_config:
get_config(dev_list, num)
# Set config
if flag_set_config:
set_same_config()
set_diff_config()
# Send file
if flag_send_file:
test_send_binary()
# Recv file
if flag_recv_file:
test_recv_binary()
# Free & de-init
rmt_py_wrapper.rmt_server_free_device_list(dev_list.cast())
rmt_py_wrapper.rmt_server_deinit()
if __name__ == "__main__":
args = sys.argv[1:]
main(args)
|
############################# Import Section ###########################################
from flask import Flask, request
from flask_restful import Resource, Api
import base64,cv2,os
import numpy as np
import pandas as pd
import pytesseract as pt
from pytesseract import Output
import requests,random,string
from google.cloud import vision
from google.cloud.vision import types
from google.protobuf.json_format import MessageToDict
import io,re
#from PIL import Image
########################## Create Flask App ###########################################
app = Flask(__name__)
# creating an API object
api = Api(app)
######################### Common Functions #############################################
## Image Preprocessing Methods
#################### Image Preprocessing Type 1 ##################################
def IncreaseIlluminationInImage(CurrentImage):
GrayScaleImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2GRAY)
rgb_planes = cv2.split(GrayScaleImage)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
ConvertedImage=cv2.merge(result_planes)
return ConvertedImage
#################### PAN Image Preprocessing Type 2 ##################################
def PreprocessPANImageType1(CurrentImage):
ConvertedImage=cv2.adaptiveThreshold(CurrentImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,55,25)
return ConvertedImage
#################### PAN Image Preprocessing Type 2 ##################################
def PreprocessPANImageType2(CurrentImage):
ConvertedImage=cv2.adaptiveThreshold(CurrentImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,57,25)
return ConvertedImage
#################### Aadhar Front Image Preprocessing Type 2 ##################################
def PreprocessAadharFrontImageType1(CurrentImage):
ConvertedImage=cv2.adaptiveThreshold(CurrentImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,53,25)
return ConvertedImage
#################### Aadhar Front Image Preprocessing Type 2 ##################################
def PreprocessAadharFrontImageType2(CurrentImage):
ConvertedImage=cv2.adaptiveThreshold(CurrentImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,51,25)
return ConvertedImage
#################### Aadhar Back Image Preprocessing Type 2 ##################################
def PreprocessAadharBackImageType1(CurrentImage):
ConvertedImage=cv2.adaptiveThreshold(CurrentImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,83,15)
return ConvertedImage
#################### Aadhar Back Image Preprocessing Type 2 ##################################
def PreprocessAadharBackImageType2(CurrentImage):
ConvertedImage=cv2.adaptiveThreshold(CurrentImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,89,15)
return ConvertedImage
#################### Calculate Bottom ##############################
def CalculateBottom(row):
current_top=row[2]
current_height=row[3]
return current_top+current_height
#################### Calculate Right ##############################
def CalculateRight(row):
current_left=row[1]
current_width=row[4]
return current_left+current_width
################### OCR Function #####################################
def PerformPANOCRTesseract(GrayScaleImage):
content=pt.image_to_data(GrayScaleImage,output_type=Output.DICT)
Words=list(content['text'])
left=list(content['left'])
top=list(content['top'])
width=list(content['width'])
height=list(content['height'])
content_dict=dict(Word=Words,Left=left,Top=top,Height=height,Width=width)
res=pd.DataFrame.from_dict(content_dict)
res=res[res['Word'].str.strip().str.len()>0]
res['Word']=res['Word'].str.replace(",","")
res=res[res['Word'].str.match(r'(^[a-zA-Z0-9/]*$)')==True]
res['Word']=res['Word'].str.upper().str.strip()
res['Bottom']=res.apply(func=CalculateBottom,axis=1)
res['Right']=res.apply(func=CalculateRight,axis=1)
res=res[['Word','Top','Left','Bottom','Right']]
res=res.sort_values(by=['Top','Left'])
return res
def PerformAadharFrontOCRTesseract(GrayScaleImage):
content=pt.image_to_data(GrayScaleImage,output_type=Output.DICT)
Words=list(content['text'])
left=list(content['left'])
top=list(content['top'])
width=list(content['width'])
height=list(content['height'])
content_dict=dict(Word=Words,Left=left,Top=top,Height=height,Width=width)
res=pd.DataFrame.from_dict(content_dict)
res=res[res['Word'].str.strip().str.len()>0]
res['Bottom']=res.apply(func=CalculateBottom,axis=1)
res['Right']=res.apply(func=CalculateRight,axis=1)
res=res[['Word','Top','Left','Bottom','Right']]
res['Word']=res['Word'].str.replace(",","")
res=res[(res['Word'].str.match(r'(^[a-zA-Z0-9/:]*$)')==True)]
res['Word']=res['Word'].str.upper().str.strip()
#res=res.sort_values(by=['Top','Left'])
return res
def PerformAadharBackOCRTesseract(GrayScaleImage):
content=pt.image_to_data(GrayScaleImage,output_type=Output.DICT)
Words=list(content['text'])
left=list(content['left'])
top=list(content['top'])
width=list(content['width'])
height=list(content['height'])
content_dict=dict(Word=Words,Left=left,Top=top,Height=height,Width=width)
res=pd.DataFrame.from_dict(content_dict)
res=res[res['Word'].str.strip().str.len()>0]
res['Bottom']=res.apply(func=CalculateBottom,axis=1)
res['Right']=res.apply(func=CalculateRight,axis=1)
res=res[['Word','Top','Left','Bottom','Right']]
res['Word']=res['Word'].str.upper().str.strip()
return res
def PerformPassportFrontOCRTesseract(GrayScaleImage):
content=pt.image_to_data(GrayScaleImage,output_type=Output.DICT)
Words=list(content['text'])
left=list(content['left'])
top=list(content['top'])
width=list(content['width'])
height=list(content['height'])
content_dict=dict(Word=Words,Left=left,Top=top,Height=height,Width=width)
ConvertedImageDF=pd.DataFrame.from_dict(content_dict)
ConvertedImageDF['Bottom']=ConvertedImageDF.apply(func=CalculateBottom,axis=1)
ConvertedImageDF['Right']=ConvertedImageDF.apply(func=CalculateRight,axis=1)
ConvertedImageDF=ConvertedImageDF[['Word','Top','Left','Bottom','Right']]
print("#################################################")
print("")
print("List of Word: ",list(ConvertedImageDF['Word']))
print("")
print("#################################################")
return ConvertedImageDF
################## Clean OCR Data ####################################
def RemoveHindiCharacters(WordList):
TempWordList=[]
for word in WordList:
ValidCharacters = [c for c in word if ord(c) < 128]
TempWordList.append("".join(ValidCharacters).strip())
return TempWordList
def CleanText(WordList):
TempWordList=[]
for word in WordList:
if "/" in word:
if word.replace("/","").isalpha():
TempWordList.append(word.replace("/",""))
else:
TempWordList.append(word)
else:
TempWordList.append(word)
return TempWordList
def CleanPassportFrontData(CurrentDF):
CurrentDF=CurrentDF[CurrentDF['Word'].str.strip().str.len()>0]
CurrentDF['Word']=CurrentDF['Word'].str.replace(",","")
CurrentDF['Word']=RemoveHindiCharacters(list(CurrentDF['Word']))
CurrentDF=CurrentDF[(CurrentDF['Word'].str.match(r'(^[a-zA-Z0-9/:<.]*$)')==True)]
CurrentDF['Word']=CurrentDF['Word'].str.upper().str.strip()
CurrentDF['Word']=CleanText(list(CurrentDF['Word']))
CurrentDF = CurrentDF[CurrentDF['Word']!="/"]
CurrentDF = CurrentDF[CurrentDF['Word']!=""]
return CurrentDF
################### Function to Reform Google Vision API Dataframe ###################
def CreateTop(row):
current_uly=row[8]
current_ury=row[6]
top=min(current_uly,current_ury)
return top
def CreateBottom(row):
current_lly=row[2]
current_lry=row[4]
bottom=max(current_lly,current_lry)
return bottom
def CreateLeft(row):
current_llx=row[1]
current_ulx=row[7]
left=min(current_llx,current_ulx)
return left
def CreateRight(row):
current_lrx=row[3]
current_urx=row[5]
right=max(current_lrx,current_urx)
return right
############## OCR Using Google Vision API ##########################
def PerformOCRGoogleVisionAPI(current_input_file_path):
with io.open(current_input_file_path, 'rb') as gen_image_file:
content = gen_image_file.read()
try:
client = vision.ImageAnnotatorClient()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
DictResponse=MessageToDict(response)
WordsAndCoordinates=DictResponse['textAnnotations'][1:]
word_list=[]
llx_list=[]
lly_list=[]
lrx_list=[]
lry_list=[]
urx_list=[]
ury_list=[]
ulx_list=[]
uly_list=[]
for i in range(0,len(WordsAndCoordinates)):
word_list.append(WordsAndCoordinates[i]['description'])
llx_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][0]['x'])
lly_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][0]['y'])
lrx_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][1]['x'])
lry_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][1]['y'])
urx_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][2]['x'])
ury_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][2]['y'])
ulx_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][3]['x'])
uly_list.append(WordsAndCoordinates[i]['boundingPoly']['vertices'][3]['y'])
##################### Create Dictionary for the lists #####################
WordsAndCoordinatesDict={"Word":word_list,'llx':llx_list,'lly':lly_list,'lrx':lrx_list,'lry':lry_list,'urx':urx_list,'ury':ury_list,'ulx':ulx_list,'uly':uly_list}
####################### Create Dataframe ######################
WordsAndCoordinatesDF = pd.DataFrame.from_dict(WordsAndCoordinatesDict)
print(list(WordsAndCoordinatesDF['Word']))
return WordsAndCoordinatesDF
except:
return "Error"
################### Function to fetch Valid Values #####################################
def GetValidValues(CurrentDF):
ValidValues=[]
for ind in CurrentDF.index:
if ind == 0:
ValidValues.append(CurrentDF['Word'][ind])
else:
current_left = CurrentDF['Left'][ind]
previous_right = CurrentDF['Right'][ind-1]
if current_left - previous_right > 60:
break
else:
ValidValues.append(CurrentDF['Word'][ind])
return " ".join(ValidValues).strip()
######### PAN Card OCR ##########
class PANCardOCR(Resource):
def post(self):
try:
################ Get File Name and Minimum Matches From Request ###############
data = request.get_json()
ImageFile = data['ImageFile']
FileType=data['filetype']
DownloadDirectory="/mnt/tmp"
randomfivedigitnumber=random.randint(10000,99999)
letters = string.ascii_lowercase
randomfivecharacters=''.join(random.choice(letters) for i in range(5))
if FileType.lower()=="jpg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpg"
elif FileType.lower()=="jpeg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpeg"
elif FileType.lower()=="png":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".png"
else:
return{'msg':'Error','description':'Unsupported File Extension'}
DownloadFilePath=DownloadDirectory+"/"+FileName
################## Download File #######################
try:
response=requests.get(str(ImageFile))
if response.status_code != 200:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
except:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
############# Write downloaded file to local ##########
try:
with open(DownloadFilePath,'wb') as f:
f.write(response.content)
except:
return{'msg':'Error','description':'Unable to save downloaded file.'}
################ Read Image from Base64 string ################################
try:
CurrentImage=cv2.imread(DownloadFilePath)
#os.remove(DownloadFilePath)
except:
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to read downladed image.'}
################ Preprocess Image #####################################
try:
IlluminatedPANCard=IncreaseIlluminationInImage(CurrentImage)
PANCardImageProcessed1=PreprocessPANImageType1(IlluminatedPANCard)
PANCardImageProcessed2=PreprocessPANImageType2(IlluminatedPANCard)
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to preprocess Image'}
#################### Perform OCR #####################################
try:
PANCardImageProcessed1DF=PerformPANOCRTesseract(PANCardImageProcessed1)
PANCardImageProcessed2DF=PerformPANOCRTesseract(PANCardImageProcessed2)
PANCardImageProcessed1DF=PANCardImageProcessed1DF[PANCardImageProcessed1DF['Word'].isin(list(PANCardImageProcessed2DF['Word']))]
PANCardImageProcessed1DF=PANCardImageProcessed1DF.reset_index(drop=True)
res=PANCardImageProcessed1DF.copy()
DepartmentRow=res[(res['Word'].str.lower().str.contains("dep")) | (res['Word'].str.lower().str.contains("inc")) | (res['Word'].str.lower().str.contains("gov")) | (res['Word'].str.lower().str.contains("indi"))]
if DepartmentRow.shape[0]!=0:
DepartmentTop=DepartmentRow['Bottom'].max()
newres=res[res['Top']>DepartmentTop]
else:
newres = res
GovtRow=res[(res['Word'].str.lower().str.contains("gov")) | (res['Word'].str.lower().str.contains("ind")) | (res['Word'].str.lower().str.contains("of"))]
if GovtRow.shape[0]!=0:
GovtRowLeft=GovtRow['Left'].min()
newres=newres[newres['Right']<GovtRowLeft]
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Corrupted Image - Unable to Perform OCR'}
try:
################ Fetch Name #########################################
Name=""
NameTop=list(newres['Top'])
if len(NameTop)!=0:
NameTop=NameTop[0]
NameTopUL=NameTop-20
NameTopLL=NameTop+20
WholeNameDF=newres[newres['Top'].between(NameTopUL,NameTopLL)]
WholeNameDF=WholeNameDF.sort_values(by='Left')
Name=" ".join(WholeNameDF['Word'])
############### Fetch DOB using "/" pattern ##########################
DateOfBirth=""
DateOfBirthDF=newres[newres['Word'].str.contains("/")]
if len(list(DateOfBirthDF['Word']))!=0:
DateOfBirth=list(DateOfBirthDF['Word'])[0]
############### Fetch Father's Name #################################
FatherName=""
if Name != "":
NameBottom=max(list(WholeNameDF['Bottom']))
if DateOfBirth!="":
DateOfBirthTop=list(DateOfBirthDF['Top'])[0]
FatherNameDF=newres[(newres['Top']>NameBottom+20) & (newres['Bottom']<DateOfBirthTop)]
if FatherNameDF.shape[0]==0:
FatherNameDF=PANCardImageProcessed1DF[(PANCardImageProcessed1DF['Top']>NameBottom+10) & (PANCardImageProcessed1DF['Bottom']<DateOfBirthTop)]
else:
FatherNameDF=newres[(newres['Top']>NameBottom+10) & (newres['Bottom']<NameBottom+70)]
FatherNameDF=FatherNameDF.sort_values(by='Left')
FatherName=" ".join(list(FatherNameDF['Word']))
###### Try to Fetch DOB Again based on Father's Name if it's blank #######
if DateOfBirth=="":
DateOfBirthDF=PANCardImageProcessed1DF[PANCardImageProcessed1DF['Word'].str.contains("/")]
if len(list(DateOfBirthDF['Word']))!=0:
DateOfBirth=list(DateOfBirthDF['Word'])[0]
################## Fetch PAN Number ###############################
PANNumber=''
PANNumberSeries=newres[(newres['Word'].str.match(r'^(?=.*[a-zA-Z])(?=.*[0-9])[A-Za-z0-9]+$')==True) & (newres['Word'].str.len()==10)]['Word']
if len(list(PANNumberSeries))!=0:
PANNumber=list(PANNumberSeries)[0]
PANNumber.upper()
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to fetch details from Tesseract Response'}
############# Create Response Dict ###############################
if ((PANNumber=="") and (DateOfBirth=="")):
################# Since Tesseract Failed So Calling Google Vision API ######################################
try:
################ Get Dataframe from Google Vision API ######################
WordsAndCoordinatesDF=PerformOCRGoogleVisionAPI(DownloadFilePath)
os.remove(DownloadFilePath)
################ Check Response from Google Vision API ######################
if str(type(WordsAndCoordinatesDF)) != "<class 'pandas.core.frame.DataFrame'>":
return {'Msg':'Error','Description':'Unable to Perform OCR using Google Vision API - Poor Image Quality.'}
else:
try:
################ Filter Dataframe ######################
res=WordsAndCoordinatesDF.copy()
res=res[res['Word'].str.match(r"^[A-Za-z0-9/]*$")==True]
DepartmentRow=res[(res['Word'].str.lower().str.contains("dep")) | (res['Word'].str.lower().str.contains("inc")) | (res['Word'].str.lower().str.contains("gov")) | (res['Word'].str.lower().str.contains("indi"))]
if DepartmentRow.shape[0]!=0:
DepartmentTop=DepartmentRow['lly'].max()
newres=res[res['lly']>DepartmentTop+10]
############# Fetch Name #####################
Name=""
NameTop=list(newres['uly'])
if len(NameTop)!=0:
NameTop=NameTop[0]
NameTopUL=NameTop-10
NameTopLL=NameTop+10
WholeNameDF=newres[newres['uly'].between(NameTopUL,NameTopLL)]
WholeNameDF=WholeNameDF.sort_values(by='ulx')
Name=" ".join(WholeNameDF['Word'])
############# Fetch Date Of Birth #####################
DateOfBirth=""
DateOfBirthDF=newres[newres['Word'].str.contains("/")]
if len(list(DateOfBirthDF['Word']))!=0:
DateOfBirth=list(DateOfBirthDF['Word'])[0]
############# Fetch Father's Name #####################
FatherName=""
if Name!="":
NameBottom=max(list(WholeNameDF['lly']))
if DateOfBirth!="":
DateOfBirthTop=list(DateOfBirthDF['uly'])[0]
FatherNameDF=newres[(newres['lly']>NameBottom+15) & (newres['uly']<DateOfBirthTop)]
if FatherNameDF.shape[0]!=0:
FatherNameDF=FatherNameDF.sort_values(by='ulx')
FatherName=" ".join(FatherNameDF['Word'])
else:
FatherNameDF=newres[(newres['uly']>NameBottom+10) & (newres['lly']<NameBottom+70)]
FatherNameDF=FatherNameDF.sort_values(by='ulx')
FatherName=" ".join(list(FatherNameDF['Word']))
############# Fetch PAN Number #####################
PANNumber=''
PANNumberSeries=newres[(newres['Word'].str.match(r'^(?=.*[a-zA-Z])(?=.*[0-9])[A-Za-z0-9]+$')==True) & (newres['Word'].str.len()==10)]['Word']
if len(list(PANNumberSeries))!=0:
PANNumber=list(PANNumberSeries)[0]
PANNumber.upper()
############### Create Response Dict #######################
print("Name: ",Name," || Father's Name: ",FatherName," || DateOfBirth: ",DateOfBirth," || PANNumber: ",PANNumber)
if ((PANNumber=="") and (DateOfBirth=="")):
return {'Msg':'Error','Description':'Unable to Perform OCR - Poor Image Quality.'}
else:
ResponseDict=dict(Msg='Success',Name=Name,FatherName=FatherName,DateOfBirth=DateOfBirth,PANNumber=PANNumber,Method="GoogleVisionAPI")
return ResponseDict
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to Perform OCR - Poor Image Quality.'}
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to Perform OCR - Poor Image Quality.'}
else:
os.remove(DownloadFilePath)
print("Name: ",Name," || Father's Name: ",FatherName," || DateOfBirth: ",DateOfBirth," || PANNumber: ",PANNumber)
ResponseDict=dict(Msg='Success',Name=Name,FatherName=FatherName,DateOfBirth=DateOfBirth,PANNumber=PANNumber,Method="Tesseract")
return ResponseDict
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unknown Exception Happened. Please make sure that the Image Orientation is upright.'}
class AadharFrontOCR(Resource):
def post(self):
try:
################ Get File Name and Minimum Matches From Request ###############
data = request.get_json()
ImageFile = data['ImageFile']
FileType=data['filetype']
DownloadDirectory="/mnt/tmp"
randomfivedigitnumber=random.randint(10000,99999)
letters = string.ascii_lowercase
randomfivecharacters=''.join(random.choice(letters) for i in range(5))
if FileType.lower()=="jpg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpg"
elif FileType.lower()=="jpeg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpeg"
elif FileType.lower()=="png":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".png"
else:
return{'msg':'Error','description':'Unsupported File Extension'}
DownloadFilePath=DownloadDirectory+"/"+FileName
################## Download File #######################
try:
response=requests.get(str(ImageFile))
if response.status_code != 200:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
except:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
############# Write downloaded file to local ##########
try:
with open(DownloadFilePath,'wb') as f:
f.write(response.content)
except:
return{'msg':'Error','description':'Unable to save downloaded file.'}
################ Read Image from Base64 string ################################
try:
CurrentImage=cv2.imread(DownloadFilePath)
CurrentImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2RGB)
CurrentImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2GRAY)
#os.remove(DownloadFilePath)
except:
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to read downladed image.'}
################ Preprocess Image #####################################
try:
AadharCardImageProcessed1=PreprocessAadharFrontImageType1(CurrentImage)
AadharCardImageProcessed2=PreprocessAadharFrontImageType2(CurrentImage)
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to preprocess Image'}
#################### Perform OCR #####################################
try:
AadharCardImageProcessed1DF=PerformAadharFrontOCRTesseract(AadharCardImageProcessed1)
AadharCardImageProcessed2DF=PerformAadharFrontOCRTesseract(AadharCardImageProcessed2)
ConvertedImageDF=AadharCardImageProcessed2DF[AadharCardImageProcessed2DF['Word'].str.strip().isin(list(AadharCardImageProcessed1DF['Word']))]
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Corrupted Image - Unable to Perform OCR'}
################ Fetch Birth Year #####################################
try:
BirthYear=""
BirthYearDF=ConvertedImageDF[ConvertedImageDF['Word'].isin(['YEAR','BIRTH'])]
if BirthYearDF.shape[0]!=0:
BirthYearTop=BirthYearDF['Top'].min()-15
BirthYearBottom=BirthYearDF['Bottom'].max()+15
BirthYearDF=ConvertedImageDF[(ConvertedImageDF['Top']>=BirthYearTop) & (ConvertedImageDF['Bottom']<=BirthYearBottom)]
BirthYearDF=BirthYearDF[BirthYearDF['Word'].str.match(r'(^[0-9]*$)')==True]
if BirthYearDF.shape[0]!=0:
BirthYear="".join(BirthYearDF['Word'])
if BirthYear == "":
test="".join(list(ConvertedImageDF['Word']))
MatchList=re.findall(r"\d\d\d\d",test)
MatchListAadhar=re.findall(r"\d\d\d\d\d\d\d\d\d\d\d\d",test)
if len(MatchList)!=0:
ProbableBirthYear=MatchList[0]
if len(MatchListAadhar) != 0:
if ProbableBirthYear not in MatchListAadhar[0]:
BirthYear=ProbableBirthYear
else:
BirthYear=ProbableBirthYear
if (BirthYear != "") and (BirthYearDF.shape[0]==0):
BirthYearDF=ConvertedImageDF[ConvertedImageDF['Word'].isin([BirthYear])]
################## Fetch Sex ################################################
Sex=""
AllWords=list(ConvertedImageDF['Word'])
if "MALE" in AllWords:
Sex="Male"
else:
Sex="Female"
################## Fetch Aadhar Number #####################################
AadharNumber=""
for word in list(ConvertedImageDF['Word']):
if re.match(r"\d\d\d\d\d\d\d\d\d\d\d\d",word):
AadharNumber=word
break
UniqueTops=list(ConvertedImageDF['Top'].unique())
ValidUniqueTops=[]
for top in UniqueTops:
current_top_range=[]
for i in range(-10,11):
current_top_range.append(top+i)
df_records=ConvertedImageDF[ConvertedImageDF['Top'].isin(current_top_range)]
if df_records.shape[0]>1:
ValidUniqueTops.append(top)
ConvertedImageDF=ConvertedImageDF[ConvertedImageDF['Top'].isin(ValidUniqueTops)]
if AadharNumber == "":
test="".join(list(ConvertedImageDF['Word']))
if BirthYear!="":
test=test.replace(BirthYear,'')
MatchList=re.findall(r"\d\d\d\d\d\d\d\d\d\d\d\d",test)
if len(MatchList) != 0:
AadharNumber=MatchList[0]
################## Fetch Name #####################################
Name=""
if "GUARDIAN" in list(ConvertedImageDF['Word']):
GUARDIANDF=ConvertedImageDF[ConvertedImageDF['Word']=="GUARDIAN"]
GUARDIANTop=list(GUARDIANDF['Top'])[0]-60
NameDF=ConvertedImageDF[ConvertedImageDF['Top']<GUARDIANTop].tail(1)
if NameDF.shape[0]==1:
NameDFTop=list(NameDF['Top'])[0]
NameDFBottom=list(NameDF['Bottom'])[0]
NameDF=ConvertedImageDF[(ConvertedImageDF['Top']>=NameDFTop-20) & (ConvertedImageDF['Bottom']<=NameDFBottom+20)]
NameDF=NameDF.sort_values(by='Left')
Name=" ".join(NameDF['Word'])
if Name == "":
if ("FATHER" in list(ConvertedImageDF['Word'])) or ("FATHER:" in list(ConvertedImageDF['Word'])):
FatherDF=ConvertedImageDF[ConvertedImageDF['Word'].isin(["FATHER:",'FATHER'])]
FatherTop=FatherDF['Top'].min()
NameDF=ConvertedImageDF[ConvertedImageDF['Top']<FatherTop-60].tail(1)
if NameDF.shape[0]==1:
NameDFTop=list(NameDF['Top'])[0]
NameDFBottom=list(NameDF['Bottom'])[0]
NameDF=ConvertedImageDF[(ConvertedImageDF['Top']>=NameDFTop-20) & (ConvertedImageDF['Bottom']<=NameDFBottom+20)]
NameDF=NameDF.sort_values(by='Left')
Name=" ".join(NameDF['Word'])
if Name == "":
if BirthYearDF.shape[0]!=0:
BirthYearTop=BirthYearDF['Top'].min()-40
NameDF=ConvertedImageDF[ConvertedImageDF['Top']<BirthYearTop].tail(1)
if NameDF.shape[0]==1:
NameDFTop=list(NameDF['Top'])[0]
NameDFBottom=list(NameDF['Bottom'])[0]
NameDF=ConvertedImageDF[(ConvertedImageDF['Top']>=NameDFTop-20) & (ConvertedImageDF['Bottom']<=NameDFBottom+20)]
NameDF=NameDF.sort_values(by='Left')
Name=" ".join(NameDF['Word'])
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to fetch details from Tesseract Response'}
############# Create Response Dict ###############################
if Name=="" or AadharNumber=="" or BirthYear=="":
################# Since Tesseract Failed So Calling Google Vision API ######################################
try:
################ Get Dataframe from Google Vision API ######################
WordsAndCoordinatesDF=PerformOCRGoogleVisionAPI(DownloadFilePath)
os.remove(DownloadFilePath)
################ Check Response from Google Vision API ######################
if str(type(WordsAndCoordinatesDF)) != "<class 'pandas.core.frame.DataFrame'>":
return {'Msg':'Error','Description':'Unable to Perform OCR using Google Vision API - Poor Image Quality.'}
else:
try:
WordsAndCoordinatesDF['Top']=WordsAndCoordinatesDF.apply(func=CreateTop,axis=1)
WordsAndCoordinatesDF['Bottom']=WordsAndCoordinatesDF.apply(func=CreateBottom,axis=1)
WordsAndCoordinatesDF['Left']=WordsAndCoordinatesDF.apply(func=CreateLeft,axis=1)
WordsAndCoordinatesDF['Right']=WordsAndCoordinatesDF.apply(func=CreateRight,axis=1)
WordsAndCoordinatesDF=WordsAndCoordinatesDF[['Word','Top','Bottom','Left','Right']]
WordsAndCoordinatesDF=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Word'].str.match(r'(^[a-zA-Z0-9]*$)')==True)]
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to reform Vision API Dataframe'}
try:
#################### Fetch Birth Year ###################################
BirthYear=""
BirthDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word']=="Birth"]
BirthRight=list(BirthDF['Right'])[0]
BirthTop=list(BirthDF['Top'])[0]
BirthBottom=list(BirthDF['Bottom'])[0]
BirthYear=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Top']>BirthTop-20) & (WordsAndCoordinatesDF['Bottom']<BirthBottom+20) & (WordsAndCoordinatesDF['Left']>BirthRight)]['Word']
BirthYear=" ".join(BirthYear)
################ Fetch Aadhar Number ####################################
AadharNumber=""
AadharCardDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word'].str.match(r'(\d\d\d\d)')==True]
if AadharCardDF.shape[0]>1:
AadharCardDF=AadharCardDF[AadharCardDF['Word']!=BirthYear]
AadharCardDF=AadharCardDF.sort_values(by='Left')
AadharNumber="".join(AadharCardDF['Word'])
####################### Fetch Sex #######################################
Sex=""
AllWords=list(WordsAndCoordinatesDF['Word'].str.lower())
if "male" in AllWords:
Sex="Male"
else:
Sex="Female"
######################## Fetch Name #####################################
Name=""
if "GUARDIAN" in list(WordsAndCoordinatesDF['Word'].str.upper()):
GUARDIANDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word'].str.upper()=="GUARDIAN"]
GUARDIANTop=list(GUARDIANDF['Top'])[0]-60
NameDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Top']<GUARDIANTop].tail(1)
if NameDF.shape[0]==1:
NameDFTop=list(NameDF['Top'])[0]
NameDFBottom=list(NameDF['Bottom'])[0]
NameDF=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Top']>=NameDFTop-40) & (WordsAndCoordinatesDF['Bottom']<=NameDFBottom+20)]
NameDF=NameDF.sort_values(by='Left')
Name=" ".join(NameDF['Word'])
if Name == "":
if ("FATHER" in list(WordsAndCoordinatesDF['Word'].str.upper())) or ("FATHER:" in list(WordsAndCoordinatesDF['Word'].str.upper())):
FatherDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word'].str.upper().isin(["FATHER:",'FATHER'])]
FatherTop=FatherDF['Top'].min()
NameDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Top']<FatherTop-60].tail(1)
if NameDF.shape[0]==1:
NameDFTop=list(NameDF['Top'])[0]
NameDFBottom=list(NameDF['Bottom'])[0]
NameDF=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Top']>=NameDFTop-20) & (WordsAndCoordinatesDF['Bottom']<=NameDFBottom+20)]
NameDF=NameDF.sort_values(by='Left')
Name=" ".join(NameDF['Word'])
if Name == "":
if BirthDF.shape[0]!=0:
BirthYearTop=BirthDF['Top'].min()-40
NameDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Top']<BirthYearTop].tail(1)
if NameDF.shape[0]==1:
NameDFTop=list(NameDF['Top'])[0]
NameDFBottom=list(NameDF['Bottom'])[0]
NameDF=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Top']>=NameDFTop-20) & (WordsAndCoordinatesDF['Bottom']<=NameDFBottom+20)]
NameDF=NameDF.sort_values(by='Left')
Name=" ".join(NameDF['Word'])
############### Create Response Dict #######################
if (Name!="") and (AadharNumber!=""):
ResponseDict=dict(Msg='Success',Name=Name,AadharNumber=AadharNumber,Sex=Sex,BirthYear=BirthYear,Method="GoogleVisionAPI")
return ResponseDict
else:
return {'Msg':'Error','Description':'Unable to Perform OCR - Poor Image Quality.'}
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to fetch data from Vision API output.'}
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to Perform OCR - Poor Image Quality.'}
else:
os.remove(DownloadFilePath)
print("Name: ",Name," || AadharNumber: ",AadharNumber," || Sex: ",Sex," || BirthYear: ",BirthYear)
ResponseDict=dict(Msg='Success',Name=Name,AadharNumber=AadharNumber,Sex=Sex,BirthYear=BirthYear,Method="Tesseract")
return ResponseDict
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unknown Exception Happened. Please make sure that the Image Orientation is upright.'}
class AadharBackOCR(Resource):
def post(self):
################ Get File Name and Minimum Matches From Request ###############
try:
data = request.get_json()
ImageFile = data['ImageFile']
FileType=data['filetype']
DownloadDirectory="/mnt/tmp"
randomfivedigitnumber=random.randint(10000,99999)
letters = string.ascii_lowercase
randomfivecharacters=''.join(random.choice(letters) for i in range(5))
if FileType.lower()=="jpg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpg"
elif FileType.lower()=="jpeg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpeg"
elif FileType.lower()=="png":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".png"
else:
return{'msg':'Error','description':'Unsupported File Extension'}
DownloadFilePath=DownloadDirectory+"/"+FileName
################## Download File #######################
try:
response=requests.get(str(ImageFile))
if response.status_code != 200:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
except:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
############# Write downloaded file to local ##########
try:
with open(DownloadFilePath,'wb') as f:
f.write(response.content)
except:
return{'msg':'Error','description':'Unable to save downloaded file.'}
################ Read Image from Base64 string ################################
try:
CurrentImage=cv2.imread(DownloadFilePath)
#os.remove(DownloadFilePath)
except:
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to read downladed image.'}
################ Preprocess Image #####################################
try:
CurrentImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2RGB)
CurrentImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2GRAY)
ConvertedImage1=PreprocessAadharBackImageType1(CurrentImage)
ConvertedImage2=PreprocessAadharBackImageType2(CurrentImage)
except:
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to preprocess Image.'}
#################### Perform OCR #####################################
try:
AadharCardImageProcessed1DF=PerformAadharBackOCRTesseract(ConvertedImage1)
AadharCardImageProcessed2DF=PerformAadharBackOCRTesseract(ConvertedImage2)
ConvertedImageDF=AadharCardImageProcessed2DF[AadharCardImageProcessed2DF['Word'].str.strip().isin(list(AadharCardImageProcessed1DF['Word']))]
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Corrupted Image - Unable to Perform OCR'}
################ Fetch Address #####################################
Address=""
try:
if ("ADDRESS:" in list(ConvertedImageDF['Word'])) or ("ADDRESS" in list(ConvertedImageDF['Word'])):
AddressLeft=ConvertedImageDF[ConvertedImageDF['Word'].isin(["ADDRESS:","ADDRESS"])]['Left'].min()
AddressLeft
AddressTop=ConvertedImageDF[ConvertedImageDF['Word'].isin(["ADDRESS:","ADDRESS"])]['Top'].max()
AddressTop
ConvertedImageDF=ConvertedImageDF[(ConvertedImageDF['Left']>=AddressLeft-5) & (ConvertedImageDF['Top']>=AddressTop-20)]
LowerLimitDF=ConvertedImageDF[ConvertedImageDF['Word'].isin(['BOX','1947','1800','HELP@UIDAI.GOV.IN','WWW.ULDAL.GOV.IN','P.O.'])]
LowerLimit=LowerLimitDF['Top'].min()
ConvertedImageDF=ConvertedImageDF[ConvertedImageDF['Bottom']<LowerLimit-60]
Address=" ".join(ConvertedImageDF['Word'])
Address=Address.replace("ADDRESS:","")
Address=Address.replace("ADDRESS","").strip()
if Address != "":
os.remove(DownloadFilePath)
ResponseDict=dict(Msg='Success',Address=Address,Method="Tesseract")
return ResponseDict
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to fetch details from Tesseract Response'}
if Address == "":
################# Since Tesseract Failed So Calling Google Vision API ######################################
try:
################ Get Dataframe from Google Vision API ######################
WordsAndCoordinatesDF=PerformOCRGoogleVisionAPI(DownloadFilePath)
os.remove(DownloadFilePath)
################ Check Response from Google Vision API ######################
if str(type(WordsAndCoordinatesDF)) != "<class 'pandas.core.frame.DataFrame'>":
return {'Msg':'Error','Description':'Unable to Perform OCR using Google Vision API - Poor Image Quality.'}
else:
try:
WordsAndCoordinatesDF['Top']=WordsAndCoordinatesDF.apply(func=CreateTop,axis=1)
WordsAndCoordinatesDF['Bottom']=WordsAndCoordinatesDF.apply(func=CreateBottom,axis=1)
WordsAndCoordinatesDF['Left']=WordsAndCoordinatesDF.apply(func=CreateLeft,axis=1)
WordsAndCoordinatesDF['Right']=WordsAndCoordinatesDF.apply(func=CreateRight,axis=1)
WordsAndCoordinatesDF=WordsAndCoordinatesDF[['Word','Top','Bottom','Left','Right']]
WordsAndCoordinatesDF=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Word'].str.match(r'(^[a-zA-Z0-9]*$)')==True)]
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to reform Vision API Dataframe'}
#################### Fetch Address ###################################
Address=""
if ("ADDRESS:" in list(WordsAndCoordinatesDF['Word'])) or ("ADDRESS" in list(WordsAndCoordinatesDF['Word'])):
AddressLeft=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word'].isin(["ADDRESS:","ADDRESS"])]['Left'].min()
AddressTop=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word'].isin(["ADDRESS:","ADDRESS"])]['Top'].max()
WordsAndCoordinatesDF=WordsAndCoordinatesDF[(WordsAndCoordinatesDF['Left']>=AddressLeft-5) & (WordsAndCoordinatesDF['Top']>=AddressTop-20)]
LowerLimitDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Word'].isin(['BOX','1947','1800','HELP@UIDAI.GOV.IN','WWW.ULDAL.GOV.IN','P.O.'])]
LowerLimit=LowerLimitDF['Top'].min()
WordsAndCoordinatesDF=WordsAndCoordinatesDF[WordsAndCoordinatesDF['Bottom']<LowerLimit-150]
Address=" ".join(WordsAndCoordinatesDF['Word'])
Address=Address.replace("ADDRESS:","")
Address=Address.replace("ADDRESS","").strip()
if Address!="":
ResponseDict=dict(Msg='Success',Address=Address,Method="GoogleVisionAPI")
return ResponseDict
else:
return {'Msg':'Error','Description':'Unable to Perform OCR - Poor Image Quality.'}
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to fetch data from Vision API output.'}
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unknown Exception Happened. Please make sure that the Image Orientation is upright.'}
class PassportFrontOCR(Resource):
def post(self):
try:
############### Initialize Variables ###########################
PassportNumber=""
Surname=""
GivenName=""
Nationality=""
Sex=""
DateOfBirth=""
PlaceOfBirth=""
PlaceOfIssue=""
DateOfIssue=""
DateOfExpiry=""
ListOfPlaceOfIssue = ['AHMEDABAD','AMRITSAR','BANGALORE','BAREILLY','BHOPAL','BHUBANESWAR','CHANDIGARH','CHENNAI',
'COIMBATORE','CUDDALORE','DEHRADUN','DELHI','DHULE','GHAZIABAD','GUWAHATI','HYDERABAD','JAIPUR','JALANDHAR','JAMMU','KOCHI','KOLKATA',
'KOZHIKODE','LUCKNOW','MADURAI','MALAPPURAM','MUMBAI','NAGPUR','PANAJI','PATNA','PUNE','RAIPUR','RANCHI','SHIMLA','SRINAGAR','SURAT',
'THANE','THIRUVANANTHAPURAM','TIRUCHIRAPPALLI','VISAKHAPATNAM']
################ Get File Name and Minimum Matches From Request ###############
data = request.get_json()
ImageFile = data['ImageFile']
FileType=data['filetype']
DownloadDirectory="/mnt/tmp"
randomfivedigitnumber=random.randint(10000,99999)
letters = string.ascii_lowercase
randomfivecharacters=''.join(random.choice(letters) for i in range(5))
if FileType.lower()=="jpg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpg"
elif FileType.lower()=="jpeg":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".jpeg"
elif FileType.lower()=="png":
FileName="File_"+str(randomfivedigitnumber)+"_"+randomfivecharacters+".png"
else:
return{'msg':'Error','description':'Unsupported File Extension'}
DownloadFilePath=DownloadDirectory+"/"+FileName
################## Download File #######################
try:
response=requests.get(str(ImageFile))
if response.status_code != 200:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
except:
return{'msg':'Error','description':'Unable to download file. Please check the file url and permissions again.'}
############# Write downloaded file to local ##########
try:
with open(DownloadFilePath,'wb') as f:
f.write(response.content)
except:
return{'msg':'Error','description':'Unable to save downloaded file.'}
################ Read Image from Base64 string ################################
try:
CurrentImage=cv2.imread(DownloadFilePath)
GrayImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2GRAY)
#cv2.imwrite("Test.jpg", CurrentImage)
#os.remove(DownloadFilePath)
except:
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to read downladed image.'}
#################### Perform OCR using Tesseract #####################################
try:
ConvertedImageDF=PerformPassportFrontOCRTesseract(CurrentImage)
ConvertedImageDF=CleanPassportFrontData(ConvertedImageDF)
print(list(ConvertedImageDF['Word']))
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Corrupted Image - Unable to Perform OCR'}
try:
################## Check if Tesseract returned all valid keywords #######################
TesseractWorks=False
DatesValid=False
PassportNumberValid=False
PlaceValid=False
SurnameValid=False
GivenNameValid=False
NationalityValid=False
FirstPlaceTop=""
############# Validity Check For Dates and Passport ################
Dates=[]
for word in list(ConvertedImageDF['Word']):
if re.match(r"^[0-9]{2}\/[0-9]{2}\/[0-9]{4}$",word.strip()):
Dates.append(word.strip())
elif re.match(r"^[A-Z]{1}[0-9]{7}$",word.strip()):
PassportNumber = word.strip()
DatesValid = len(Dates) == 3
print("Dates Valid: ",DatesValid)
print("")
PassportNumberValid = PassportNumber!=""
print("Passport Number Valid: ",PassportNumberValid)
print("")
############# Validity Check For Place ################
PlaceDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.startswith('PLA')) | (ConvertedImageDF['Word'].str.endswith('ACE'))].reset_index(drop=True)
PlaceValid = PlaceDF.shape[0]==2
print("Place Valid: ",PlaceValid)
print("")
############# Validity Check For Surname ################
SurnameDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.endswith("URNAME")) | (ConvertedImageDF['Word'].str.startswith("SURN"))]
SurnameValid = SurnameDF.shape[0] == 1
print("Surname Valid: ",SurnameValid)
print("")
############# Validity Check For Given Name ################
GivenDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.endswith("VEN")) | (ConvertedImageDF['Word'].str.startswith("GIV"))]
GivenNameValid = GivenDF.shape[0] == 1
print("Given Name Valid: ",GivenNameValid)
print("")
############# Validity Check For Nationality ################
NationalityDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.startswith("NATION")) | (ConvertedImageDF['Word'].str.endswith("NALITY"))]
NationalityValid = NationalityDF.shape[0] == 1
print("Nationality Valid: ",NationalityValid)
print("")
############ Validity Check for whole dataframe
TesseractWorks = DatesValid and PassportNumberValid and PlaceValid and SurnameValid and GivenNameValid and NationalityValid
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to check if all required keywords are present or not in Tesseract output'}
########################## Proceed with tesseract output if Tesseract works ####################################
if TesseractWorks:
################ Get Date Of Birth, Issue and Expiry ##################################
try:
DatesDF = ConvertedImageDF[ConvertedImageDF['Word'].isin(Dates)]
PlaceBottom = PlaceDF['Bottom'][0]
DOBDF = DatesDF[DatesDF['Bottom']<PlaceBottom].reset_index(drop=True)
OtherDates = DatesDF[DatesDF['Top']>PlaceBottom].reset_index(drop=True)
if DOBDF.shape[0] == 1:
DateOfBirth = DOBDF['Word'][0]
if OtherDates.shape[0] == 2:
OtherDates = OtherDates.sort_values(by='Left').reset_index(drop=True)
DateOfIssue = OtherDates['Word'][0].strip()
DateOfExpiry = OtherDates['Word'][1].strip()
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to get dates from Tesseract output'}
####################### Get Sex #########################################
try:
for word in list(ConvertedImageDF['Word']):
if word.strip() == "F":
Sex = "Female"
break
elif word.strip() == "M":
Sex = "Male"
break
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to get Sex from Tesseract output'}
################ Get Surname and Given Names ################################
try:
SurnameBottom = list(SurnameDF['Bottom'])[0]
GivenTop = list(GivenDF['Top'])[0]
GivenBottom = list(GivenDF['Bottom'])[0]
NationalityTop = list(NationalityDF['Top'])[0]
SurnameValueDF = ConvertedImageDF[(ConvertedImageDF['Top'] > SurnameBottom) & (ConvertedImageDF['Bottom'] < GivenTop+10)]
SurnameValueDF=SurnameValueDF.sort_values(by='Left').reset_index(drop=True)
Surname = GetValidValues(SurnameValueDF)
GivenNameDF = ConvertedImageDF[(ConvertedImageDF['Top'] > GivenBottom) & (ConvertedImageDF['Bottom'] < NationalityTop+10)]
GivenNameDF=GivenNameDF.sort_values(by='Left').reset_index(drop=True)
GivenName = GetValidValues(GivenNameDF)
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to get Surname and Given Name from Tesseract output'}
##################### Get Place Of Issue and Place Of Birth #####################################
try:
PlaceDFSorted = PlaceDF.sort_values(by='Top').reset_index(drop=True)
LastPlaceBottom = PlaceDFSorted['Bottom'][1]
ConvertedImageDFAfterPlaceOfIssue = ConvertedImageDF[ConvertedImageDF['Bottom'] > LastPlaceBottom]
ConvertedImageDFAfterPlaceOfIssue = ConvertedImageDFAfterPlaceOfIssue.sort_values(by='Top')
AfterPlaceOfIssueWords=list(ConvertedImageDFAfterPlaceOfIssue['Word'])
for word in AfterPlaceOfIssueWords:
for place in ListOfPlaceOfIssue:
if place in word.strip():
PlaceOfIssue = place
break
FirstPlaceBottom = PlaceDFSorted['Bottom'][0]
FirstPlaceTop = PlaceDFSorted['Top'][0]
PlaceOfBirthDF = ConvertedImageDF[(ConvertedImageDF['Bottom'] > FirstPlaceBottom+10) &
(ConvertedImageDF['Bottom'] < LastPlaceBottom-10)]
PlaceOfBirthDF = PlaceOfBirthDF.sort_values(by='Left').reset_index(drop=True)
PlaceOfBirth = GetValidValues(PlaceOfBirthDF)
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to get Place Of Issue and Place Of Birth from Tesseract output'}
########################## Get Nationality #########################################
try:
if (FirstPlaceTop != ""):
NationalityBottom = list(NationalityDF['Bottom'])[0]
NationalityValuesDF = ConvertedImageDF[(ConvertedImageDF['Bottom']>NationalityBottom) &
(ConvertedImageDF['Bottom']<FirstPlaceTop)]
print(NationalityValuesDF)
NationalityValuesDF = NationalityValuesDF.sort_values(by='Left').reset_index(drop=True)
Nationality = GetValidValues(NationalityValuesDF).split()[0]
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to get Nationality from Tesseract output'}
###################### Return Response ################################
ResponseDict=dict(Msg='Success',PassportNumber=PassportNumber,Surname=Surname,GivenName=GivenName,
Nationality=Nationality,Sex=Sex,DateOfBirth=DateOfBirth,PlaceOfIssue=PlaceOfIssue,
DateOfIssue=DateOfIssue,DateOfExpiry=DateOfExpiry,Method="Tesseract")
return ResponseDict
else:
print("Unable to Fetch Values using Tesseract so proceeeding for Google Vision API !!")
print("")
################ Get Dataframe from Google Vision API ######################
try:
CurrentImage=cv2.imread(DownloadFilePath)
GrayImage=cv2.cvtColor(CurrentImage, cv2.COLOR_BGR2GRAY)
cv2.imwrite(DownloadFilePath,GrayImage)
WordsAndCoordinatesDF=PerformOCRGoogleVisionAPI(DownloadFilePath)
os.remove(DownloadFilePath)
################ Check Response from Google Vision API ######################
if str(type(WordsAndCoordinatesDF)) != "<class 'pandas.core.frame.DataFrame'>":
return {'Msg':'Error','Description':'Unable to Perform OCR using Google Vision API - Poor Image Quality.'}
except Exception as e:
print(e)
if os.path.exists(DownloadFilePath):
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to perform OCR using Google Vision API.'}
################ Get top, bottom, left, right from Google Vision API ######################
try:
ConvertedImageDF = WordsAndCoordinatesDF.copy()
ConvertedImageDF['Top']=ConvertedImageDF.apply(func=CreateTop,axis=1)
ConvertedImageDF['Bottom']=ConvertedImageDF.apply(func=CreateBottom,axis=1)
ConvertedImageDF['Left']=ConvertedImageDF.apply(func=CreateLeft,axis=1)
ConvertedImageDF['Right']=ConvertedImageDF.apply(func=CreateRight,axis=1)
ConvertedImageDF=ConvertedImageDF[['Word','Top','Bottom','Left','Right']]
print(list(ConvertedImageDF['Word']))
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to create top/bottom/left/right from Google Vision API response.'}
################ Clean Passport Front Page Data #############################
try:
ConvertedImageDF = CleanPassportFrontData(ConvertedImageDF)
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to clean Google Vision API response.'}
################## Check if Google Vision API returned all valid keywords #######################
try:
GVAWorks=False
DatesValid=False
PassportNumberValid=False
PlaceValid=False
SurnameValid=False
GivenNameValid=False
NationalityValid=False
FirstPlaceTop=""
############# Validity Check For Dates and Passport ################
Dates=[]
for word in list(ConvertedImageDF['Word']):
if re.match(r"^[0-9]{2}\/[0-9]{2}\/[0-9]{4}$",word.strip()):
Dates.append(word.strip())
elif re.match(r"^[A-Z]{1}[0-9]{7}$",word.strip()):
PassportNumber = word.strip()
DatesValid = len(Dates) == 3
print("Dates Valid: ",DatesValid)
print("")
PassportNumberValid = PassportNumber!=""
print("Passport Number Valid: ",PassportNumberValid)
print("")
############# Validity Check For Place ################
PlaceDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.startswith('PLA')) | (ConvertedImageDF['Word'].str.endswith('ACE'))].reset_index(drop=True)
PlaceValid = PlaceDF.shape[0]==2
print("Place Valid: ",PlaceValid)
print("")
############# Validity Check For Surname ################
SurnameDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.endswith("URNAME")) | (ConvertedImageDF['Word'].str.startswith("SURN"))]
SurnameValid = SurnameDF.shape[0] == 1
print("Surname Valid: ",SurnameValid)
print("")
############# Validity Check For Given Name ################
GivenDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.endswith("VEN")) | (ConvertedImageDF['Word'].str.startswith("GIV"))]
GivenNameValid = GivenDF.shape[0] == 1
print("Given Name Valid: ",GivenNameValid)
print("")
############# Validity Check For Nationality ################
NationalityDF = ConvertedImageDF[(ConvertedImageDF['Word'].str.startswith("NATION")) | (ConvertedImageDF['Word'].str.endswith("NALITY"))]
NationalityValid = NationalityDF.shape[0] == 1
print("Nationality Valid: ",NationalityValid)
print("")
############ Validity Check for whole dataframe
GVAWorks = DatesValid and PassportNumberValid and PlaceValid and SurnameValid and GivenNameValid and NationalityValid
if not(GVAWorks):
return {'Msg':'Error','Description':'All required keywords are not present in GVA output'}
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to check if all required keywords are present or not in GVA output'}
################ Get Date Of Birth, Issue and Expiry ##################################
try:
DatesDF = ConvertedImageDF[ConvertedImageDF['Word'].isin(Dates)]
PlaceBottom = PlaceDF['Bottom'][0]
DOBDF = DatesDF[DatesDF['Bottom']<PlaceBottom].reset_index(drop=True)
OtherDates = DatesDF[DatesDF['Top']>PlaceBottom].reset_index(drop=True)
if DOBDF.shape[0] == 1:
DateOfBirth = DOBDF['Word'][0]
if OtherDates.shape[0] == 2:
OtherDates = OtherDates.sort_values(by='Left').reset_index(drop=True)
DateOfIssue = OtherDates['Word'][0].strip()
DateOfExpiry = OtherDates['Word'][1].strip()
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to get dates from GVA output'}
################# Get Sex and Passport Number ############################
try:
for word in list(ConvertedImageDF['Word']):
if word.strip() == "F":
Sex = "Female"
break
elif word.strip() == "M":
Sex = "Male"
break
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to get Sex from GVA output'}
################ Get Surname and Given Names ################################
try:
SurnameBottom = list(SurnameDF['Bottom'])[0]
GivenTop = list(GivenDF['Top'])[0]
GivenBottom = list(GivenDF['Bottom'])[0]
NationalityTop = list(NationalityDF['Top'])[0]
SurnameValueDF = ConvertedImageDF[(ConvertedImageDF['Bottom'] > SurnameBottom) & (ConvertedImageDF['Top'] < GivenTop-15)]
SurnameValueDF=SurnameValueDF.sort_values(by='Left').reset_index(drop=True)
Surname = GetValidValues(SurnameValueDF)
GivenNameDF = ConvertedImageDF[(ConvertedImageDF['Bottom'] > GivenBottom) & (ConvertedImageDF['Top'] < NationalityTop-20)]
GivenNameDF=GivenNameDF.sort_values(by='Left').reset_index(drop=True)
GivenName = GetValidValues(GivenNameDF)
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to get Surname and Given Name from GVA output'}
##################### Get Place Of Issue and Place Of Birth #####################################
try:
PlaceDFSorted = PlaceDF.sort_values(by='Top').reset_index(drop=True)
LastPlaceBottom = PlaceDFSorted['Bottom'][1]
ConvertedImageDFAfterPlaceOfIssue = ConvertedImageDF[ConvertedImageDF['Bottom'] > LastPlaceBottom]
ConvertedImageDFAfterPlaceOfIssue = ConvertedImageDFAfterPlaceOfIssue.sort_values(by='Top')
AfterPlaceOfIssueWords=list(ConvertedImageDFAfterPlaceOfIssue['Word'])
for word in AfterPlaceOfIssueWords:
for place in ListOfPlaceOfIssue:
if place in word.strip():
PlaceOfIssue = place
break
FirstPlaceBottom = PlaceDFSorted['Bottom'][0]
FirstPlaceTop = PlaceDFSorted['Top'][0]
PlaceOfBirthDF = ConvertedImageDF[(ConvertedImageDF['Bottom'] > FirstPlaceBottom+10) &
(ConvertedImageDF['Bottom'] < LastPlaceBottom-10)]
PlaceOfBirthDF = PlaceOfBirthDF.sort_values(by='Left').reset_index(drop=True)
PlaceOfBirth = GetValidValues(PlaceOfBirthDF)
except Exception as e:
print(e)
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unable to get Place Of Issue and Place Of Birth from GVA output'}
########################## Grab Nationality #########################################
try:
if (FirstPlaceTop != ""):
NationalityBottom = list(NationalityDF['Bottom'])[0]
NationalityValuesDF = ConvertedImageDF[(ConvertedImageDF['Bottom']>NationalityBottom) &
(ConvertedImageDF['Top']<FirstPlaceTop-15)]
NationalityValuesDF = NationalityValuesDF.sort_values(by='Left').reset_index(drop=True)
Nationality = GetValidValues(NationalityValuesDF).split()[0]
except Exception as e:
print(e)
return {'Msg':'Error','Description':'Unable to get Nationality from GVA output'}
###################### Return Response ################################
ResponseDict=dict(Msg='Success',PassportNumber=PassportNumber,Surname=Surname,GivenName=GivenName,
Nationality=Nationality,Sex=Sex,DateOfBirth=DateOfBirth,PlaceOfIssue=PlaceOfIssue,
DateOfIssue=DateOfIssue,DateOfExpiry=DateOfExpiry,Method="GoogleVisionAPI")
return ResponseDict
except Exception as e:
print(e)
if os.path.exists(DownloadFilePath):
os.remove(DownloadFilePath)
return {'Msg':'Error','Description':'Unknown Exception Happened. Please make sure that the Image Orientation is upright.'}
#################### Configure URLs #########################
api.add_resource(PANCardOCR,'/PancardOCR')
api.add_resource(AadharFrontOCR,'/AadharFrontOCR')
api.add_resource(AadharBackOCR,'/AadharBackOCR')
api.add_resource(PassportFrontOCR,'/PassportFrontOCR')
################# Run Flask Server ##########################
if __name__ == '__main__':
app.run(debug = True,host='0.0.0.0')
|
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import numpy as np
import random
import Tkinter as Tk
from grafica import Grafica
import vectorEntrenamiento as vE
root = Tk.Tk()
root.wm_title("Perceptron")
C_ZERO = 0
MIN_VAL = -5
MAX_VAL = 5
maxEpocas = Tk.StringVar()
lr = Tk.StringVar()
X0 = -1
W0 = vE.Entrada( vE.getRandom( MIN_VAL, MAX_VAL ) )
entradas = ( vE.Entrada( vE.getRandom( MIN_VAL, MAX_VAL ) ),
vE.Entrada( vE.getRandom( MIN_VAL, MAX_VAL ) ) )
class Ventana():
def __init__(self):
self.fig = plt.figure()
canvas = FigureCanvasTkAgg(self.fig, master=root)
self.grafica = Grafica(self.fig)
self.grafica.setCanvas(canvas)
self.ax = self.grafica.ax
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
#canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
btnQuit = Tk.Button(master=root, text="Salir", command=self._quit)
btnQuit.pack(side=Tk.BOTTOM)
lblLr = Tk.Label(master=root, text="Learning rate: ")
lblLr.pack(side=Tk.LEFT)
entryLr = Tk.Entry(master=root, bd=5, textvariable=lr)
entryLr.pack(side=Tk.LEFT)
lblEpocas = Tk.Label(master=root, text="Epocas: ")
lblEpocas.pack(side=Tk.LEFT)
entryEpocas = Tk.Entry(master=root, bd=5, textvariable=maxEpocas)
entryEpocas.pack(side=Tk.LEFT)
btnEntrenar = Tk.Button(master=root, text="Entrenar", command=self.entrenar)
btnEntrenar.pack(side=Tk.RIGHT)
self.lblEstado = Tk.Label(master=root, text="Estado: Configurando")
self.lblEstado.pack(side=Tk.LEFT)
def entrenar(self):
tieneError = True
sinEntrenar = False
iteracion = 0
self.plotPesos(entradas, W0.getValor(), 0)
while tieneError:
tieneError = False
for vector in self.grafica.vectoresEntrenamiento:
salida = self.respuesta(vector)
error = vector.getClase() - salida
if ( error != C_ZERO ):
tieneError = True
W0.setValor( W0.getValor() + float(lr.get()) * error * X0 )
for i,entrada in enumerate(entradas):
peso = entrada.getValor() + float(lr.get()) * error * vector.getCoordenadas()[i]
entrada.setValor(peso)
if iteracion == int(maxEpocas.get()):
tieneError = False
sinEntrenar = True
self.plotPesos(entradas,W0.getValor(), 1)
iteracion += 1
self.plotPesos(entradas, W0.getValor(), 2)
if sinEntrenar:
self.lblEstado.config(text="Estado: No converge")
else:
if not tieneError:
self.lblEstado.config(text="Estado: Entrenado")
def respuesta(self, vector):
suma = X0 * W0.getValor()
for i, entrada in enumerate(entradas):
suma += vector.getCoordenadas()[i] * entrada.getValor()
if suma >= C_ZERO:
return 1
else:
return C_ZERO
def plotPesos(self, entradas, bias, tipo):
o = None
if tipo == 0: c = 'red'
elif tipo == 1: c = 'cyan'
else: c = 'green'
weightArray = [entradas[0].getValor(), entradas[1].getValor()]
x = np.array(range(-10,10))
y = eval( '(' + str(bias)+'/'+str(weightArray[1]) + ')-((' +str(weightArray[0])+'/'+str(weightArray[1]) + ')*x)' )
self.ax.plot(x,y,'--', color=c)
self.fig.canvas.draw()
def _quit(self):
root.quit()
root.destroy()
|
from django import forms
from main.models import Client
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm
class RegisterForm(UserChangeForm):
phone = forms.CharField(max_length=28)
real_name = forms.CharField(max_length=128, required=False)
class Meta:
model = User
fields = ['username', 'password', 'email']
def clean_username(self):
data = self.cleaned_data['username']
duplicate_users = User.objects.filter(username=data)
if self.instance.pk is not None:
duplicate_users = duplicate_users.exclude(pk=self.instance.pk)
if duplicate_users.exists():
raise forms.ValidationError('Такий логін вже зайнятий! Придумайте щось інше!')
return data
def clean_email(self):
data = self.cleaned_data['email']
leave = User.objects.filter(email=data)
if leave.exists():
test_false = leave.filter(email__exact='')
if test_false.exists():
return data
else:
raise forms.ValidationError(
'Такий Email вже використовується! Якщо це Ваш Email адрес спробуйте відновити свій обліковий запис!')
else:
return data
def clean_phone(self):
data = self.cleaned_data['phone']
if User.objects.filter(client__phone=data).exists():
raise forms.ValidationError('Такий номер телефону вже використовується! Якщо це Ваш номер телефону спробуйте відновити свій обліковий запис!')
return data
def clean_password(self):
data = self.cleaned_data['password']
if len(data) <= 2:
raise forms.ValidationError('Пароль повинен містити хоча б два символи, будь-ласка!')
return data
class LoginForm(forms.Form):
username = forms.CharField(max_length=32)
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
class ResetTelForm(forms.ModelForm):
class Meta:
model = Client
fields = ['phone']
|
from flask import Flask, render_template, jsonify, redirect, flash, request, url_for, Response, Blueprint
import flask
from flask_login import LoginManager, login_required, login_user, logout_user, current_user
from flask_caching import Cache
# import bcrypt
from flask_bcrypt import Bcrypt
import logging
import random
from bson import ObjectId
import json
import numpy as np
import pandas as pd
import traceback
from datetime import datetime
from pprint import pprint
from os import environ
import os
from backend.src.pytypes import conv, Card, CardLang, V, Deck, User, UserAlreadyExists
import backend.src.helpers as h
import backend.src.db as db
from backend.src import loggingManager
from backend.src import MailSender
eventslog = logging.getLogger('events')
cache = Cache(config={
# 'CACHE_TYPE': 'FileSystemCache', 'CACHE_DIR': '/.flask-cache', "CACHE_DEFAULT_TIMEOUT": 9999999
'CACHE_TYPE': 'RedisCache',
'CACHE_REDIS_URL': 'redis://redis',
'CACHE_REDIS_PORT': '6379',
"CACHE_DEFAULT_TIMEOUT": 9999999,
})
flsk = Blueprint(
'bprnt', __name__,
static_folder='./backend/static',
)
@flsk.route("/status", methods=["GET"])
def health_check():
logging.debug("debug log")
logging.info("info log")
logging.warning("warning log")
logging.error("error log")
# logging.exception("exception log")
return make_response("OK", 200)
@flsk.route("/api/cards", methods=["GET"])
def get_cards():
# cards = [Card([CardLang("fr", "test")])]
args = {}
if "search" in request.args and request.args['search'] is not None and len(request.args['search']):
args['langs'] = {'$elemMatch': {"text": {"$regex": h.prepare_user_input_search_regex(request.args['search'])}}}
if "deck" in request.args and request.args['deck'] is not None and len(request.args['deck']):
args['decks'] = {'$elemMatch': {"$eq": request.args['deck']}}
total_n = db.db.cards.count_documents(args)
cards = db.db.cards.find(args)
if "offset" in request.args:
cards = cards.skip(int(request.args["offset"]))
if "first" in request.args:
cards = cards.limit(int(request.args["first"]))
# logging.exception("exception log")
print(total_n, "cards.")
return jsonify(dict(n=total_n, cards=[V.decode(e).toDict() for e in cards]))
@flsk.route("/api/update-usercard", methods=["POST"])
# @login_required
def update_usercard():
if current_user.is_anonymous():
return jsonify({})
# user = request.json['user']
user = current_user.id
card = ObjectId(request.json['card'])
difficulty = request.json['difficulty']
print("update user card", request.json)
already = db.db.usercards.find_one({'user': user, "card": card})
bucket = 0 if already is None else already['bucket']
BUCKET_MAX = 2
card_id = ObjectId(card)
diffbk = dict(
easy=1,
normal=0,
hard=-1
)
new_bucket = max(0, min(BUCKET_MAX, bucket+diffbk[difficulty]))
reviewed = datetime.now()
if already is None:
db.db.usercards.insert({
'user': user,
"card": card_id,
"bucket": new_bucket,
"reviewed": reviewed,
})
else:
db.db.usercards.update_one(
{'user': user, "card": card_id},
{"$set": {"bucket": new_bucket, "reviewed": reviewed}}
)
print(f"Update Bucket {bucket} -> {new_bucket}: {describe_card(card)}")
return jsonify(dict(bucket=new_bucket))
def describe_card(card_id):
return " / ".join([e['text'] for e in db.db.cards.find_one({'_id': ObjectId(str(card_id))})['langs']])
def _train_cards(user, deck=None):
usercards = list(db.db.usercards.aggregate([
{'$match': {"user": user}},
{"$lookup": {
"localField": "card",
"foreignField": "_id",
"from": "cards",
"as": "card"
}},
{"$unwind": "$card"},
{'$project': {"_id": 0}},
]))
def prepare_user_card(e):
e["card"] = V.decode(e['card']).toDict()
return e
usercards = {
e["card"]['id']: e
for e in map(prepare_user_card, usercards)
if deck is None or deck in e['card'].get("decks", [])
}
# usercards
matchd = {}
if deck is not None:
matchd["decks"] = {'$elemMatch': {"$eq": deck}}
for c in db.get_cards(matchd):
cd = c.toDict()
if cd['id'] not in usercards:
usercards[cd['id']] = dict(
user=user,
bucket=0,
reviewed=datetime.fromtimestamp(0),
card=cd,
)
# [e['bucket'] for e in usercards.values()]
# for id, c in cards.items():
for c in usercards.values():
card_descr = "/".join([e['text'] for e in c['card']['langs']])
# print(card_descr, ':', c["bucket"])
return list(usercards.values())
@flsk.route("/api/train-cards", methods=["GET"])
@login_required
def train_cards():
# user = request.args['user']
user = current_user
return jsonify(_train_cards(user.id, request.args.get('deck')))
@flsk.route("/api/train-card", methods=["GET"])
@login_required
def train_card():
# user = request.args['user']
user = current_user.id
deck = request.args.get('deck')
current = request.args.get('current')
print("selecting a random card", user, deck)
cards = [e for e in _train_cards(user, deck) if e['card']['id']!=current]
bucket_weights = {
k: v for k, v in {
0: 0.6,
1: 0.3,
2: 0.1,
}.items()
if k in [e['bucket'] for e in cards]
}
bucket = random.choices(list(bucket_weights.keys()), weights=list(bucket_weights.values()), k=1)[0]
print('random bucket:', bucket)
bucket_cards = [e for e in cards if e["bucket"] == bucket]
card = random.choices(bucket_cards, k=1)[0]
print("-> card: ", describe_card(card['card']['id']))
return jsonify(dict(card=card))
@flsk.route("/api/decks", methods=["GET", "POST", "DELETE"])
def decks():
if request.method == "GET":
# return jsonify([V.decode(e).toDict() for e in db.db.decks.find()])
return jsonify(db.get_decks_with_n_cards())
elif request.method == "POST":
print('save deck', request.json)
db.update_deck(conv.structure(request.json, Deck))
return "ok", 200
elif request.method == "DELETE":
db.delete_deck(request.json['id'])
return "ok", 200
@flsk.route("/api/add-card", methods=["POST"])
@login_required
def add_card():
is_new = request.json.get('isNew', False)
c = conv.structure(request.json['card'], Card)
print('SAVE CARD', c, is_new)
# logging.exception("exception log")
if is_new:
try:
c.creator = current_user.id
db.add_card(c)
except:
already = db.find_similar_card(c)
return jsonify(dict(card=already.toDict())), 400
else:
if not (current_user.is_admin() or current_user.id == c.creator):
return "unauthorized", 401
db.update_card(c)
return "ok", 200
@flsk.route("/api/delete-card", methods=["POST"])
@login_required
def delete_card():
c = conv.structure(request.json, Card).toBsonDict()
if not (current_user.is_admin() or current_user.id == c.creator):
return "unauthorized", 401
print('DELETE CARD', c)
# logging.exception("exception log")
assert c["_id"] is not None and len(str(c["_id"]))
db.db.cards.delete_one({"_id": c["_id"]})
return "ok", 200
@flsk.route('/api/upload', methods=['POST'])
def engine():
try:
decks = request.args.get('decks')
if decks is not None:
decks = decks.split(',')
errors = db.insert_file(list(request.files.values())[0], decks=decks)
except Exception as e:
print('----------------------------------')
print(e)
eventslog.error(f'upload failed: {e}')
raise e
return jsonify(errors), 200
@flsk.route("/api/langs", methods=["GET"])
def get_langs():
return jsonify([
{"id": lid, "title": ltitle} for lid, ltitle in
[
# ("en", "English"),
("fr", "Français"),
("fa", "فارسی"),
# ("fe", "fenglish")
]
])
############ user management
def hash_password(p):
return bcrypt.generate_password_hash(p).decode("utf-8")
# @flsk.route('/api/register', methods=['POST'])
# def register():
# # todo registration form
# d = structure(request.json, User)
# register_user_and_hash_pwd(d)
# return "ok", 200
######## user activation
def create_user_activation_link(u, force=False):
if force or u.activation_link is None or len(u.activation_link) == 0:
link, _ = h.generate_activation_link(f"{u.id}")
assert link is not None
u.activation_link = link
db.update_user(u.id, {'activation_link': link})
eventslog.info(f'update user {u.id} activation link')
def register_user_and_hash_pwd(u):
u.email = u.email.strip()
u.password = hash_password(u.password)
link, _ = h.generate_activation_link(f"{u.id}")
u.activation_link = link
db.insert_user(u)
return u
def mkurl(*path):
return os.path.join(environ['WEBSITE_ROOT_URL'], *path)
def send_user_activation(u):
create_user_activation_link(u, force=False)
print(f"******** {u.activation_link=}")
url = mkurl("user/activate", u.activation_link)
eventslog.info(f'sending user {u.email} activation link')
MailSender.send_email_with_link(u.email, url)
# def create_user(u: User):
# ''' create a user and it's activation link '''
# u.email = u.email.strip()
# id = db.insert_user(u)
# create_user_activation_link(u)
# eventslog.info(f'user {u.email} created: {id}, activation link: u.activation_link')
@flsk.route('/register', methods=['POST'])
def register():
try:
u = register_user_and_hash_pwd(conv.structure(request.json, User))
send_user_activation(u)
except UserAlreadyExists:
return "this user already exists!", 500
return "ok", 200
def get_user_infos(current_user):
return {k: v for k, v in current_user.toDict().items() if k != 'password'}
@flsk.route('/api/user/activate/<linkid>', methods=['POST'])
def activate_user(linkid):
u = db.get_user(filtr={'activation_link': linkid})
print(f'activating user {linkid} for {u.email if u else None}')
if u is None:
print('invalid')
return "invalid activation link", 400
else:
print(f'activation success')
db.update_user(u.id, {
"active": True,
"activation_link": "",
})
eventslog.info(f'user {u.id} activated')
return "ok", 200
@flsk.route('/api/user/reset-password/<linkid>', methods=['POST'])
def reset_user_password(linkid):
u = db.get_user(filtr={'activation_link': linkid})
eventslog.info('resetting user password {u.email}')
if u is None:
return "invalid activation link", 400
else:
db.update_user(u.id, {
"activation_link": "",
"password": hash_password(request.json['password'])
})
eventslog.info(f'user {u.password} password reset')
return "ok", 200
@flsk.route('/api/mail-from-activation-link', methods=['GET'])
def get_mail_from_link():
u = db.get_user(filtr={'activation_link': request.args['link']})
return "test@mail.com"
if u is None:
return "invalid activation link", 404
else:
return jsonify(u.mail)
@flsk.route('/api/reset-password-send-link', methods=['GET'])
def reset_password():
try:
print("reset password")
print(request.args)
u = db.get_user(email=request.args['email'])
print(u)
assert u is not None and u.active, f"password reset failed: {request.args}"
create_user_activation_link(u, force=True)
url = mkurl("user/reset-password", u.activation_link)
eventslog.info(f'resetting user {u.email} password: sending email, activation link: {url}')
MailSender.send_email_with_link(
u.email,
url,
reason="reset your password"
)
except Exception as e:
traceback.print_exc()
logging.getLogger('errors').error(f"{e}")
return "ok", 200
#### end of activation
@flsk.route('/login-check', methods=['GET'])
def login_check():
u = None
print("LOGIN CHECK")
print(current_user)
if current_user.is_authenticated:
logging.info(f'already authenticated as {current_user}')
ans = dict(
email=current_user.email,
admin=current_user.is_admin(),
)
print('********', ans)
return jsonify(ans)
else:
return jsonify({})
@flsk.route('/login', methods=['POST'])
def login():
u = None
j = request.json
userDb = db.get_user(email=j["email"])
if userDb is not None:
if not userDb.active:
return "Please confirm your email", 401
password_ok = bcrypt.check_password_hash(
userDb.password, j["password"])
if password_ok:
print("LOGIN OK", userDb.email)
userDb.authenticated = True
login_user(userDb, remember=True)
print(f'**************************** {userDb.id} logged in')
return jsonify(
dict(
email=current_user.email,
admin=current_user.is_admin(),
)), 200
eventslog.info(f'{j["email"]} failed to log in (wrong username/password)')
return "Invalid credentials", 401
@flsk.route("/logout", methods=["GET"])
@login_required
def logout():
eventslog.info(f'{current_user.id} logged out')
logout_user()
return jsonify("ok")
################# end of user management
@flsk.route('/', defaults={'path': ''})
@flsk.route('/<path:path>')
def index(path):
return render_template('index.html')
root_url = os.path.join('/', "/mikarezoo-flashcards")
static_url_path = os.path.join(root_url, "static")
app = Flask(__name__, static_url_path=static_url_path)
app.register_blueprint(flsk, url_prefix=root_url)
cache.init_app(app)
app.secret_key = environ['FLASK_SECRET_KEY']
login_manager = LoginManager()
login_manager.init_app(app)
bcrypt = Bcrypt(app)
@login_manager.user_loader
def load_user(user_id):
return db.get_user(user_id)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
app.run(host="0.0.0.0", debug=True)
else:
# gunicorn_logger = logging.getLogger('gunicorn.error')
# app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(logging.DEBUG)
# logging = app.logger
|
def median(arr):
arr = sorted(arr)
indx = len(arr)//2
if (len(arr)/2).is_integer():
return (arr[indx-1] + arr[indx]) / 2
return arr[indx]
'''
Description:
The mean (or average) is the most popular measure of central tendency; however
it does not behave very well when the data is skewed (i.e. wages distribution).
In such cases, it's better to use the median.
Your task for this kata is to find the median of an array consisting of n elements.
You can assume that all inputs are arrays of numbers in integer format.
For the empty array your code should return NaN (false in JavaScript/NULL in PHP).
Examples:
Input [1, 2, 3, 4] --> Median 2.5
Input [3, 4, 1, 2, 5] --> Median 3
'''
|
##########################################################################################
# #
# ICT FaceKit #
# #
# Copyright (c) 2020 USC Institute for Creative Technologies #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
##########################################################################################
"""Defines functionality to verify the loading of the ICT Face Model.
"""
import copy
import os
import openmesh as om
import face_model_io
def main():
"""Verifies that the FaceModel class loads the ICT Face Model properly.
"""
face_model = face_model_io.load_face_model('../FaceXModel')
verify_model_loaded('./verfication_files', face_model)
def verify_model_loaded(dir_path, face_model):
"""Writes meshes to verify that the ICT Face Model was loaded correctly.
Allows the user to verify that the ICT Face Model was loaded correctly.
Does this by recreating the original expression and identity meshes from
the expression shape modes, identity shape modes, and generic neutral mesh.
Saves these meshes as .obj files to a specified directory. Creates the
specified directory if it does not already exist. These verification files
can be inspected in an application like Blender.
"""
# Check if ICT face model loaded
if not face_model._model_initialized: # pylint:disable=protected-access
print("The FaceModel has not loaded the ICT Face Model.")
return
# Make the directory if it doesn't exist
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# In line comments ignore protected access and line too long linter errors
gn_mesh = face_model._generic_neutral_mesh # noqa: E501 pylint:disable=protected-access
ex_names = face_model._expression_names # pylint:disable=protected-access
ex_shape_modes = face_model._expression_shape_modes # noqa: E501 pylint:disable=protected-access
id_names = face_model._identity_names # pylint:disable=protected-access
id_shape_modes = face_model._identity_shape_modes # noqa: E501 pylint:disable=protected-access
# Write expression verification meshes
print("Writing expression meshes...")
_verify_model_loaded_helper(dir_path, gn_mesh, ex_names, ex_shape_modes)
# Write identity verification meshes
print("Writing identity meshes...")
_verify_model_loaded_helper(dir_path, gn_mesh, id_names, id_shape_modes)
# Alert user to status of script
print("Completed writing verification meshes.")
def _verify_model_loaded_helper(dir_path, generic_neutral_mesh, file_names,
shape_modes):
"""Helper procedure to write the verification meshes.
"""
for file_name, shape_mode in zip(file_names, shape_modes):
# Initialize the verification mesh
write_mesh = copy.deepcopy(generic_neutral_mesh)
write_points = write_mesh.points()
# Create the verification mesh
write_points[:] = generic_neutral_mesh.points() + shape_mode
# Write the meshes to be verified
write_path = os.path.join(dir_path, file_name + '.obj')
om.write_mesh(write_path, write_mesh)
if __name__ == '__main__':
main()
|
# Define variable
i = 4
d = 4.0
s = 'HackerRank '
# Receiving input
integer = int( input() )
double = float( input() )
string = input()
# Print output (1. Sum integer 2. Sum float 3. Concat string)
print(i+integer)
print(d+double)
print(s+string)
|
# from re import escape, findall, split
#
#
# def find(needle, haystack):
# if '_' not in needle:
# return haystack.find(needle)
# reg = [escape(a) if '_' not in a else '.{{{}}}'.format(len(a))
# for a in split('(_+)', needle)]
# matches = findall('({})'.format(''.join(reg)), haystack)
# return haystack.find(matches[0]) if matches else -1
import re
def find(needle, haystack):
""" Solution from 'knight07' on CodeWars """
compiled = re.compile(re.escape(needle).replace("\\_", "\S"))
searched = re.search(compiled, haystack)
return searched.start() if searched else -1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame, os, sys, random, pyganim
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
DIRECTIONS = [UP, RIGHT, DOWN, LEFT]
def load_image(nombre, dir_imagen, alpha=False):
ruta = os.path.join(dir_imagen, nombre)
try:
image = pygame.image.load(ruta)
except:
print "Error, no se puede cargar la imagen: ", ruta
sys.exit(1)
if alpha == True:
image = image.convert_alpha()
else:
image = image.convert()
return image
class Tile(pygame.sprite.Sprite):
def __init__(self, image, hasAlpha, x, y, breakable, crossable):
pygame.sprite.Sprite.__init__(self)
self.image = load_image(image, "sprites/", alpha=hasAlpha)
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
self.isBreakable = breakable
self.isCrossable = crossable
self.x, self.y = self.getMapCoordinates(x,y)
def getMapCoordinates(self, x, y):
return (x-16)/16,(y-32)/16
def getSpriteCoordinates(self, x, y):
return 16+16*x,32+16*y
def destroy(self):
self.kill()
return None
def position(self, d, pos = 1):
if d == UP:
return self.up(pos)
elif d == RIGHT:
return self.right(pos)
elif d == DOWN:
return self.down(pos)
elif d == LEFT:
return self.left(pos)
def up(self, pos = 1):
return self.x,self.y-pos
def right(self, pos = 1):
return self.x+pos,self.y
def down(self, pos = 1):
return self.x,self.y+pos
def left(self, pos = 1):
return self.x-pos,self.y
class Block(Tile):
def __init__(self, x, y):
Tile.__init__(self, "block.png", False, x, y, False, False)
class Brick(Tile):
def __init__(self, x, y, powerup = None):
Tile.__init__(self, "brick.png", False, x, y, True, False)
self.powerup = powerup
def destroy(self):
self.kill()
return self.powerup
class Bomberman(Tile):
def __init__(self, x, y, number):
Tile.__init__(self, "body%d.png" % (number), True, x, y, True, True)
self.front_standing = pygame.image.load("sprites/bomber%d/bomber_front.png" % (number))
self.back_standing = pygame.image.load("sprites/bomber%d/bomber_back.png" % (number))
self.left_standing = pygame.image.load("sprites/bomber%d/bomber_left.png" % (number))
self.right_standing = pygame.transform.flip(self.left_standing, True, False)
self.animTypes = 'back_walk front_walk left_walk'.split()
self.animObjs = {}
for animType in self.animTypes:
imagesAndDurations = [('sprites/bomber%d/bomber_%s.%s.png' % (number,animType, str(num).rjust(3, '0')), 0.1) for num in range(3)]
self.animObjs[animType] = pyganim.PygAnimation(imagesAndDurations)
self.animObjs['right_walk'] = self.animObjs['left_walk'].getCopy()
self.animObjs['right_walk'].flip(True, False)
self.animObjs['right_walk'].makeTransformsPermanent()
self.moveConductor = pyganim.PygConductor(self.animObjs)
self.moving = False
self.direction = DOWN
self.number = number
self.maxBombs = 1
self.bombExpansion = 2
self.bombs = []
self.insideBomb = None
self.playerNumber = number
self.speed = 1
self.transport = False
# Head part
self.head = load_image("head%d.png" % (number), "sprites/", alpha=True)
self.rhead = self.head.get_rect()
self.updateHead()
def draw(self, screen):
if self.moving:
self.moveConductor.play()
if self.direction == UP:
self.animObjs['back_walk'].blit(screen, (self.rect.x, self.rect.y-13))
elif self.direction == DOWN:
self.animObjs['front_walk'].blit(screen, (self.rect.x, self.rect.y-13))
elif self.direction == LEFT:
self.animObjs['left_walk'].blit(screen, (self.rect.x, self.rect.y-13))
elif self.direction == RIGHT:
self.animObjs['right_walk'].blit(screen, (self.rect.x, self.rect.y-13))
else:
self.moveConductor.stop()
if self.direction == UP:
screen.blit(self.back_standing, (self.rect.x, self.rect.y-13))
elif self.direction == DOWN:
screen.blit(self.front_standing, (self.rect.x, self.rect.y-13))
elif self.direction == LEFT:
screen.blit(self.left_standing, (self.rect.x, self.rect.y-13))
elif self.direction == RIGHT:
screen.blit(self.right_standing, (self.rect.x, self.rect.y-13))
def updateHead(self):
self.rhead.centerx = self.rect.centerx
self.rhead.y = self.rect.top-13
def updatePosition(self):
if self.rect.centerx % 16 < 16/2:
x = self.rect.centerx - self.rect.centerx % 16
else:
x = self.rect.centerx + (16 - self.rect.centerx % 16)
if self.rect.centery % 16 < 16/2:
y = self.rect.centery - self.rect.centery % 16
else:
y = self.rect.centery + (16 - self.rect.centery % 16)
self.x, self.y = self.getMapCoordinates(x,y)
self.updateHead()
def createBomb(self, time):
if len(self.bombs) < self.maxBombs:
x, y = self.getSpriteCoordinates(self.x, self.y)
b = Bomb(x,y, time, self.bombExpansion, self)
self.bombs.append(b)
self.insideBomb = b
return b
def removeBomb(self, bomb):
self.bombs.remove(bomb)
class Bomb(Tile):
def __init__(self, x, y, time, expansion, player):
Tile.__init__(self, "bomb.png", True, x, y, True, False)
self.timer = time
self.owner = player
self.expansion = expansion
self.bombAnim = pyganim.PygAnimation([('sprites/bomb.png', 0.2),
('sprites/bomb1.png', 0.2),
('sprites/bomb2.png', 0.2)])
self.bombAnim.play()
def draw(self, screen):
self.bombAnim.blit(screen, (self.rect.x, self.rect.y))
class Fire(Tile):
def __init__(self, x, y, time, expansion):
Tile.__init__(self, "fire/fire.png", True, x, y, False, True)
self.timer = time
# Starts from top, right, bottom, left
self.max_expansion = expansion
self.expansion = [expansion, expansion, expansion, expansion]
width = height = 16*(2*expansion+1)
self.image = pygame.Surface([width,height], pygame.SRCALPHA, 32)
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
def getFirePosition(self, direction, pos):
if direction == UP:
return (self.max_expansion*16,(self.max_expansion-pos)*16)
if direction == RIGHT:
return ((self.max_expansion+pos)*16,self.max_expansion*16)
if direction == DOWN:
return (self.max_expansion*16,(self.max_expansion+pos)*16)
if direction == LEFT:
return ((self.max_expansion-pos)*16,self.max_expansion*16)
def updateSprite(self):
center = load_image("fire-center.png", "sprites/fire", alpha=True)
self.image.blit(center, (self.max_expansion*16,self.max_expansion*16))
fireTypes = 'up right down left'.split()
for direction in DIRECTIONS:
fire_image = load_image('fire-%s.png' % fireTypes[direction], "sprites/fire", alpha=True)
fire_end = load_image('fire-%s-end.png' % fireTypes[direction], "sprites/fire", alpha=True)
for pos in range(1, self.expansion[direction]+1):
if pos < self.max_expansion:
self.image.blit(fire_image, self.getFirePosition(direction, pos))
else:
self.image.blit(fire_end, self.getFirePosition(direction, pos))
class BombPower(Tile):
def __init__(self, x, y):
Tile.__init__(self, "powerbomb.png", False, x, y, True, True)
def activate(self, player):
player.maxBombs = player.maxBombs + 1
class FirePower(Tile):
def __init__(self, x, y):
Tile.__init__(self, "powerfire.png", False, x, y, True, True)
def activate(self, player):
player.bombExpansion = player.bombExpansion + 1
class SpeedPower(Tile):
def __init__(self, x, y):
Tile.__init__(self, "powerspeed.png", False, x, y, True, True)
def activate(self, player):
if player.speed == 1:
player.speed = 2
class TransportPower(Tile):
def __init__(self, x, y):
Tile.__init__(self, "powertransport.png", False, x, y, True, True)
def activate(self, player):
player.transport = True
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:kmeans_example.py
# @Author: Michael.liu
# @Date:2020/6/4 11:45
# @Desc: this code is ....
import codecs
import os
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.cluster import KMeans
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
import random
from matplotlib.font_manager import FontProperties
corpus_path = u"../../data/chapter4/cluster/xml_data_process.txt"
def build_feature_matrix(document,feature_type='frequency',ngram_range=(1,1),min_df=0.0,max_df=1.0):
feature_type = feature_type.lower().strip()
if feature_type == 'binary':
vectorizer = CountVectorizer(binary=True,max_df=max_df,ngram_range=ngram_range)
elif feature_type == 'frequency':
vectorizer = CountVectorizer(binary= False,min_df = min_df,max_df=max_df,ngram_range=ngram_range)
elif feature_type == 'tfidf':
vectorizer = TfidfVectorizer()
else:
raise Exception("Wrong feature type entered.Possible values:'binary','frequency','tfidf'")
feature_matrix = vectorizer.fit_transform(document).astype(float)
print(feature_matrix)
return vectorizer,feature_matrix
def load_data():
news_data = pd.read_csv(corpus_path,sep='±±±±',encoding='utf-8')
#print(rd.head(5))
#print(type(rd))
news_title = news_data['title'].tolist()
news_content = news_data['content'].tolist()
return news_title,news_content,news_data
def k_means(feature_matrix, num_clusters=10):
km = KMeans(n_clusters=num_clusters,
max_iter=10000)
km.fit(feature_matrix)
clusters = km.labels_
return km, clusters
def get_cluster_data(clustering_obj, news_data,
feature_names, num_clusters,
topn_features=10):
cluster_details = {}
# 获取cluster的center
ordered_centroids = clustering_obj.cluster_centers_.argsort()[:, ::-1]
# 获取每个cluster的关键特征
# 获取每个cluster的书
for cluster_num in range(num_clusters):
cluster_details[cluster_num] = {}
cluster_details[cluster_num]['cluster_num'] = cluster_num
key_features = [feature_names[index] for index in ordered_centroids[cluster_num, :topn_features]]
cluster_details[cluster_num]['key_features'] = key_features
news = news_data[news_data['Cluster'] == cluster_num]['title'].values.tolist()
cluster_details[cluster_num]['content'] = news
return cluster_details
def process():
title,content,news_data = load_data()
#Todo 去掉一些停用词
filter_content = content
vectorizer, feature_matrix = build_feature_matrix(filter_content,
feature_type='tfidf',
min_df=0.2, max_df=0.90,
ngram_range=(1, 2))
#print(feature_matrix.shape)
# 获取特征名字
feature_names = vectorizer.get_feature_names()
#print(feature_names)
# 打印某些特征
print(feature_names[:10])
num_clusters = 10
km_obj, clusters = k_means(feature_matrix=feature_matrix,
num_clusters=num_clusters)
news_data['Cluster'] = clusters
c = Counter(clusters)
print(c.items())
# 取 cluster 数据
cluster_data = get_cluster_data(clustering_obj=km_obj,
news_data=news_data,
feature_names=feature_names,
num_clusters=num_clusters,
topn_features=5)
if __name__ == '__main__':
print("start>>>>>>")
process()
print(">>>>>>>>end")
|
from flask import Flask,url_for,render_template
from Blog.config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
import os
app=Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
bcrypt=Bcrypt(app)
from Blog.users.routes import users
from Blog.auth.routes import auth
app.register_blueprint(users)
app.register_blueprint(auth)
|
#!/usr/bin/env python3
#
# This script is intended to illustrate the energy balance by
# plotting ohmic heating and radiative losses as a function of temperature
# at equilibrium ionization, similarly to figure 6 in Vallhagen et al JPP 2020.
# This is achieved by setting a prescribed temperature profile at the values
# one wants to plot for and run a dynamic simulation until equilibrium has been reached
# (since equilibrium ionization settings does not seem to work yet).
#
# NOTE! Depending on the densities and temperatures one might have to adjust Tmax_restart_eq
# to be long enough to really reach sufficient equilibration!
#
# ###################################################################
import numpy as np
import sys
import matplotlib.pyplot as plt
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
from DREAM.DREAMOutput import DREAMOutput
from DREAM import runiface
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
import DREAM.Settings.Equations.ElectricField as Efield
import DREAM.Settings.Equations.RunawayElectrons as RE
import DREAM.Settings.Equations.HotElectronDistribution as FHot
import DREAM.Settings.Equations.ColdElectronTemperature as T_cold
from DREAM.Settings.Equations.ElectricField import ElectricField
from DREAM.Settings.Equations.ColdElectronTemperature import ColdElectronTemperature
ds = DREAMSettings()
# set collision settings
ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_FULL
ds.collisions.collfreq_type = Collisions.COLLFREQ_TYPE_PARTIALLY_SCREENED
#ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_NEGLECT
ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_STOPPING_POWER
#ds.collisions.lnlambda = Collisions.LNLAMBDA_CONSTANT
ds.collisions.lnlambda = Collisions.LNLAMBDA_ENERGY_DEPENDENT
ds.collisions.pstar_mode = Collisions.PSTAR_MODE_COLLISIONAL
# ds.eqsys.n_re.setEceff(Eceff=RE.COLLQTY_ECEFF_MODE_SIMPLE)
#############################
# Set simulation parameters #
#############################
n_D = 41e20 # deuterium density
n_Z = 0.08e20 # Impurity density
J=1.69e6 # Current density (For caculation of ohmic heating)
B0 = 5.3 # magnetic field strength in Tesla
Tmax_init = 1e-11 # simulation time in seconds
Nt_init = 2 # number of time steps
Tmax_restart_ioniz = 2e-6
Nt_restart_ioniz = 500
Tmax_restart_eq = 30e-3
Nt_restart_eq = 1000
Tmax_restart_rad=1e-11
Nt_restart_rad=2
Nr = 151 # number of radial grid points
times = [0] # times at which parameters are given
radius = [0, 2] # span of the radial grid
radialgrid = np.linspace(radius[0],radius[-1],Nr)
radius_wall = 2.15 # location of the wall
E_initial = 0.001 # initial electric field in V/m (arbitrary value, does not affect the purpose of this script)
E_wall = 0.0001 # boundary electric field in V/m
# NOTE: it does not work to have self-consistent E-field with prescribed BC with E_wall=0,
# since that leads to Psi_wall=0 constantly, which does not work when you have a relative tolerance
T_initial = np.logspace(np.log10(0.7),np.log10(2e3),Nr) # initial temperature in eV
# Set up radial grid
ds.radialgrid.setB0(B0)
ds.radialgrid.setMinorRadius(radius[-1])
ds.radialgrid.setNr(Nr)
ds.radialgrid.setWallRadius(radius_wall)
# Set time stepper
ds.timestep.setTmax(Tmax_init)
ds.timestep.setNt(Nt_init)
# Set ions
Z0=1
Z=10
# If one wants to start from another initial ionization than fully ionized deuterium and neutral impurities
# Depending on the temperature range of interest, this can give a faster equilibration
"""
n_D_tmp=np.zeros(2)
n_D_tmp[0]=0*n_D
n_D_tmp[1]=1*n_D
n_D_tmp=n_D_tmp.reshape(-1,1)*np.ones((1,len(radius)))
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_DYNAMIC, n=n_D_tmp,r=np.array(radius))
n_Z_tmp=np.zeros(Z+1)
n_Z_tmp[Z0]=n_Z
n_Z_tmp=n_Z_tmp.reshape(-1,1)*np.ones((1,len(radius)))
ds.eqsys.n_i.addIon(name='Ne', Z=Z, iontype=Ions.IONS_DYNAMIC, n=n_Z_tmp,r=np.array(radius))
"""
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_DYNAMIC_FULLY_IONIZED, n=n_D, opacity_mode=Ions.ION_OPACITY_MODE_GROUND_STATE_OPAQUE)
ds.eqsys.n_i.addIon(name='Ne', Z=Z, iontype=Ions.IONS_DYNAMIC_NEUTRAL, n=n_Z)
# Since this script is intended to illustrate the energy balance at equilibrium ionization,
# it would be preferable to use these settings but that does not seem to work yet.
"""
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_EQUILIBRIUM, n=n_D)
ds.eqsys.n_i.addIon(name='Ne', Z=Z, iontype=Ions.IONS_EQUILIBRIUM, n=n_Z)
"""
temperature = T_initial * np.ones((len(times), len(radialgrid)))
ds.eqsys.T_cold.setPrescribedData(temperature=temperature, times=times, radius=radialgrid)
# Set E_field
efield = E_initial*np.ones((len(times), len(radius)))
ds.eqsys.E_field.setPrescribedData(efield=efield, times=times, radius=radius)
ds.eqsys.E_field.setBoundaryCondition()
# Disable runaway and hot-tail grid
ds.runawaygrid.setEnabled(False)
ds.hottailgrid.setEnabled(False)
# Use the nonlinear solver
ds.solver.setType(Solver.NONLINEAR)
ds.solver.setLinearSolver(linsolv=Solver.LINEAR_SOLVER_LU)
ds.other.include('fluid')
# Save settings to HDF5 file
ds.save('init_settings.h5')
runiface(ds, 'output_init.h5', quiet=False)
#### Ionization #############
ds2 = DREAMSettings(ds)
ds2.timestep.setTmax(Tmax_restart_ioniz)
ds2.timestep.setNt(Nt_restart_ioniz)
ds2.save('ioniz_restart_settings.h5')
runiface(ds2, 'output_restart_ioniz.h5', quiet=False)
#### Equilibration ############
ds3 = DREAMSettings(ds2)
ds3.timestep.setTmax(Tmax_restart_eq)
ds3.timestep.setNt(Nt_restart_eq)
ds3.save('eq_restart_settings.h5')
runiface(ds3, 'output_restart_eq.h5', quiet=False)
#### Radiation ################
ds4 = DREAMSettings(ds3)
ds4.eqsys.T_cold.setType(ttype=T_cold.TYPE_SELFCONSISTENT)
ds4.timestep.setTmax(Tmax_restart_rad)
ds4.timestep.setNt(Nt_restart_rad)
ds4.save('rad_restart_settings.h5')
runiface(ds4, 'output_restart_rad.h5', quiet=False)
################ Plot #################
do=DREAMOutput(ds4.output.filename)
sigma=do.other.fluid.conductivity[0,:]
rad=do.other.fluid.Tcold_radiation[0,:]
T=do.eqsys.T_cold[0,:]
plt.loglog(T,J**2/sigma/1e6)
plt.loglog(T,rad/1e6)
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from ICA_noise import FastICA
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
rng = np.random.RandomState(1)
# generate source signal
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# mix source signal
A = np.array([[1, 2], [0, 3]]) # mixing matrix
X = np.dot(S, A.T) # observation
# add noise
sigma = 0.5
X = X + sigma * rng.randn(20000, 2)
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng, n_components=2)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
# #############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('sigma=0')
plt.subplots_adjust(0.09, 0.05, 0.94, 0.94, 0.26, 0.36)
plt.show()
print(ica.score(X))
|
from django.db import models
class ItemLiturgia(models.Model):
titulo = models.CharField(max_length=255)
descricao = models.CharField(max_length=255, blank=True, null=True)
diaLiturgico = models.ForeignKey("DiaLiturgico")
posicao = models.PositiveSmallIntegerField()
class Meta:
app_label = "mpm"
def __str__(self):
return self.diaLiturgico.__str__() + " - " + self.titulo.encode("utf-8")
|
# run tests locally:
# $ export PYTHONPATH=`pwd`
# $ python3 tests/ticTacToeTest.py
# or use -m unittest:
# $ python3 -m unittest tests/ticTacToe.py
import unittest
from tictactoe.game.ticTacToe import TicTacToe
class TestTicTacToe(unittest.TestCase):
def setUp(self):
self.game = TicTacToe("X")
def test_empty_game_field(self):
for row in self.game.gf:
for ele in row:
self.assertEqual(ele, None)
def test_set_player(self):
self.game.set_obj(0, 0) # X
self.game.set_obj(1, 0) # O
self.game.set_obj(2, 0) # X
self.game.set_obj(0, 1) # O
self.game.set_obj(0, 2) # X
self.assertEqual(self.game.gf,
[["X", "O", "X"],
["O", None, None],
["X", None, None]])
def test_set_only_once(self):
self.assertTrue(self.game.set_obj(0, 0))
self.assertFalse(self.game.set_obj(0, 0))
self.assertFalse(self.game.set_obj(0, 0))
def test_won_empty(self):
# test empty game field
self.assertEqual(self.game.check_player_has_won(), None)
def test_won_row(self):
# test row
self.game.gf = [["X", "X", "X"],
["O", "O", None],
[None, None, None]]
self.assertEqual(self.game.check_player_has_won(), "X")
self.game.gf = [["O", "O", None],
["X", "X", "X"],
["O", None, None]]
self.assertEqual(self.game.check_player_has_won(), "X")
self.game.gf = [[None, None, None],
["O", "O", None],
["X", "X", "X"]]
self.assertEqual(self.game.check_player_has_won(), "X")
def test_won_column(self):
# test column
self.game.gf = [["X", "O", None],
["X", "O", None],
["X", None, None]]
self.assertEqual(self.game.check_player_has_won(), "X")
self.game.gf = [[None, "X", "O"],
[None, "X", "O"],
[None, "X", None]]
self.assertEqual(self.game.check_player_has_won(), "X")
self.game.gf = [[None, "O", "X"],
[None, "O", "X"],
[None, None, "X"]]
self.assertEqual(self.game.check_player_has_won(), "X")
def test_won_top_left_bottom_right(self):
# test top left to bottom right
self.game.gf = [["X", None, None],
["O", "X", None],
["O", None, "X"]]
self.assertEqual(self.game.check_player_has_won(), "X")
def test_won_bottom_left_top_right(self):
# test bottom left to top right
self.game.gf = [["X", "X", "O"],
["X", "O", None],
["O", None, None]]
self.assertEqual(self.game.check_player_has_won(), "O")
def test_won_draw(self):
# test draw
self.game.gf = [["X", "X", "O"],
["O", "O", "X"],
["X", "O", "X"]]
self.assertEqual(self.game.check_player_has_won(), None)
def test_check_draw_false(self):
self.game.gf = [["X", "X", None],
["O", "O", "X"],
["X", "O", "X"]]
self.assertFalse(self.game.check_draw())
def test_check_draw_true(self):
self.game.gf = [["X", "X", "O"],
["O", "O", "X"],
["X", "O", "X"]]
self.assertTrue(self.game.check_draw())
if __name__ == '__main__':
unittest.main()
|
def make_readable(seconds):
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return '{:02}:{:02}:{:02}'.format(hours, minutes, seconds)
|
import autograd as ag
import click
import copy
import numpy as np
import logging
import pickle
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import RobustScaler
from sklearn.utils import check_random_state
from recnn.recnn import log_loss
from recnn.recnn import adam
from recnn.recnn import event_baseline_init
from recnn.recnn import event_baseline_predict
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s %(levelname)s] %(message)s")
@click.command()
@click.argument("filename_train")
@click.argument("filename_model")
@click.argument("n_events")
@click.option("--n_features_rnn", default=4)
@click.option("--n_hidden_rnn", default=40)
@click.option("--n_epochs", default=20)
@click.option("--batch_size", default=64)
@click.option("--step_size", default=0.0005)
@click.option("--decay", default=0.9)
@click.option("--n_particles_per_event", default=10)
@click.option("--random_state", default=1)
def train(filename_train,
filename_model,
n_events,
n_features_rnn=4,
n_hidden_rnn=40,
n_epochs=5,
batch_size=64,
step_size=0.01,
decay=0.7,
n_particles_per_event=10,
random_state=1):
# Initialization
n_events = int(n_events)
logging.info("Calling with...")
logging.info("\tfilename_train = %s" % filename_train)
logging.info("\tfilename_model = %s" % filename_model)
logging.info("\tn_events = %d" % n_events)
logging.info("\tn_features_rnn = %d" % n_features_rnn)
logging.info("\tn_hidden_rnn = %d" % n_hidden_rnn)
logging.info("\tn_epochs = %d" % n_epochs)
logging.info("\tbatch_size = %d" % batch_size)
logging.info("\tstep_size = %f" % step_size)
logging.info("\tdecay = %f" % decay)
logging.info("\tn_particles_per_event = %d" % n_particles_per_event)
logging.info("\trandom_state = %d" % random_state)
rng = check_random_state(random_state)
# Make data
logging.info("Loading data + preprocessing...")
fd = open(filename_train, "rb")
# training file is assumed to be formatted a sequence of pickled pairs
# (e_i, y_i), where e_i is a list of (phi, eta, pt, mass) tuples.
X = []
y = []
for i in range(n_events):
v_i, y_i = pickle.load(fd)
v_i = v_i[:n_particles_per_event] # truncate to the top particles
X.append(v_i)
y.append(y_i)
y = np.array(y)
fd.close()
logging.info("\tfilename = %s" % filename_train)
logging.info("\tX size = %d" % len(X))
logging.info("\ty size = %d" % len(y))
# Preprocessing
logging.info("Preprocessing...")
tf_features = RobustScaler().fit(
np.vstack([features for features in X]))
for i in range(len(X)):
X[i] = tf_features.transform(X[i])
if len(X[i]) < n_particles_per_event:
X[i] = np.vstack([X[i],
np.zeros((n_particles_per_event - len(X[i]), 4))])
# Split into train+test
logging.info("Splitting into train and validation...")
X_train, X_valid, y_train, y_valid = train_test_split(X, y,
test_size=1000,
stratify=y,
random_state=rng)
# Training
logging.info("Training...")
predict = event_baseline_predict
init = event_baseline_init
trained_params = init(n_features_rnn, n_hidden_rnn,
random_state=rng)
n_batches = int(np.ceil(len(X_train) / batch_size))
best_score = [-np.inf] # yuck, but works
best_params = [trained_params]
def loss(X, y, params):
y_pred = predict(params, X,
n_particles_per_event=n_particles_per_event)
l = log_loss(y, y_pred).mean()
return l
def objective(params, iteration):
rng = check_random_state(iteration)
start = rng.randint(len(X_train) - batch_size)
idx = slice(start, start+batch_size)
return loss(X_train[idx], y_train[idx], params)
def callback(params, iteration, gradient):
if iteration % 25 == 0:
roc_auc = roc_auc_score(y_valid,
predict(params, X_valid,
n_particles_per_event=n_particles_per_event))
if roc_auc > best_score[0]:
best_score[0] = roc_auc
best_params[0] = copy.deepcopy(params)
fd = open(filename_model, "wb")
pickle.dump(best_params[0], fd)
fd.close()
logging.info(
"%5d\t~loss(train)=%.4f\tloss(valid)=%.4f"
"\troc_auc(valid)=%.4f\tbest_roc_auc(valid)=%.4f" % (
iteration,
loss(X_train[:5000], y_train[:5000], params),
loss(X_valid, y_valid, params),
roc_auc,
best_score[0]))
for i in range(n_epochs):
logging.info("epoch = %d" % i)
logging.info("step_size = %.4f" % step_size)
trained_params = adam(ag.grad(objective),
trained_params,
step_size=step_size,
num_iters=1 * n_batches,
callback=callback)
step_size = step_size * decay
if __name__ == "__main__":
train()
|
######################################################
# Multi-Layer Perceptron Classifier for MNIST dataset
# Mark Harvey
# Dec 2018
######################################################
import tensorflow as tf
import os
import sys
import shutil
#####################################################
# Set up directories
#####################################################
# Returns the directory the current script (or interpreter) is running in
def get_script_directory():
path = os.path.realpath(sys.argv[0])
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
SCRIPT_DIR = get_script_directory()
print('This script is located in: ', SCRIPT_DIR)
# create a directory for the MNIST dataset if it doesn't already exist
MNIST_DIR = os.path.join(SCRIPT_DIR, 'mnist_dir')
if not (os.path.exists(MNIST_DIR)):
os.makedirs(MNIST_DIR)
print("Directory " , MNIST_DIR , "created ")
# create a directory for the TensorBoard data if it doesn't already exist
# delete it and recreate if it already exists
TB_LOG_DIR = os.path.join(SCRIPT_DIR, 'tb_log')
if (os.path.exists(TB_LOG_DIR)):
shutil.rmtree(TB_LOG_DIR)
os.makedirs(TB_LOG_DIR)
print("Directory " , TB_LOG_DIR , "created ")
#####################################################
# Dataset preparation
#####################################################
# download of dataset, will only run if doesn't already exist in disk
mnist_dataset = tf.keras.datasets.mnist.load_data(path=os.path.join(MNIST_DIR, 'mnist_data') )
"""
The split into training & test datasets is already done for us by
the tf.keras.datasets.mnist.load_data function which returns tuples
of Numpy arrays - 60k images in training set, 10k in test dataset
- x_train: set of 60k training images
- y_train: set of 60k training labels
- x_test : set of 10k training images
- y_test : set of 10k training labels
"""
(x_train, y_train), (x_test, y_test) = mnist_dataset
"""
You should always know what your dataset looks like, so lets print some
# information about it...
"""
print("The training dataset has {img} images and {lbl} labels".format(img=len(x_train), lbl=len(y_train)))
print("The test dataset has {img} images and {lbl} labels".format(img=len(x_test), lbl=len(y_test)))
print("The training dataset shape is: {shp}".format(shp=x_train.shape))
print("The shape of each member of the training data is: {shp}".format(shp=x_train[0].shape))
print("The datatype of each pixel of the images is: {dtyp}".format(dtyp=x_train[0].dtype))
print("The shape of each label is: {shp}".format(shp=y_train[0].shape))
print("The datatype of each label is: {dtyp}".format(dtyp=y_train[0].dtype))
"""
Based on this information, we know that we have some work to do on the dataset..
- we can't input a 28x82 image to a MLP, we need to flatten the data to a 784 element vector
where 784 = 28 x 28.
- we should scale the pixel data from its current range of 0:255 to 0:1.
- the labels are integers, our MLP outputs 10 probabilities, each from 0 to 1. We need to
convert the integer labels into one-hot encoded vectors of 10 elements.
"""
# flatten the images
x_train = x_train.reshape(len(x_train), 784)
x_test = x_test.reshape(len(x_test), 784)
# The image pixels are 8bit integers (uint8)
# scale them from range 0:255 to range 0:1
x_train = x_train / 255.0
x_test = x_test / 255.0
# one-hot encode the labels
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
print('\nThe datasets now look like this:')
print("The training dataset shape is: {shp}".format(shp=x_train.shape))
print("The training labels shape is: {shp}".format(shp=y_train.shape))
print("The shape of each member of the training data is: {shp}".format(shp=x_train[0].shape))
print("The shape of each label is: {shp}".format(shp=y_train[0].shape))
print("The datatype of each label is: {dtyp}".format(dtyp=y_train[0].dtype))
###############################################
# Hyperparameters
###############################################
BATCHSIZE=50
LEARNRATE=0.0001
STEPS=int(len(x_train) / BATCHSIZE)
#####################################################
# Create the Computational graph
#####################################################
# in this sections, we define the MLP, placeholders for feeding in data
# and the loss and optimizer functions
# define placeholders for the input data & labels
x = tf.placeholder('float32', [None, 784], name='images_in')
y = tf.placeholder('float32', [None,10], name='labels_in')
# MLP definition
input_layer = tf.layers.dense(inputs=x, units=784, activation=tf.nn.relu)
hidden_layer1 = tf.layers.dense(inputs=input_layer, units=196, activation=tf.nn.relu)
hidden_layer2 = tf.layers.dense(inputs=hidden_layer1, units=10, activation=None)
prediction = tf.nn.softmax(hidden_layer2)
# Define a cross entropy loss function
loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=hidden_layer2, onehot_labels=y))
# Define the optimizer function
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNRATE).minimize(loss)
# accuracy
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# variable initialization
init = tf.initializers.global_variables()
# TensorBoard data collection
tf.summary.scalar('cross_entropy loss', loss)
tf.summary.scalar('accuracy', accuracy)
#####################################################
# Create & run the graph in a Session
#####################################################
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# TensorBoard writer
writer = tf.summary.FileWriter(TB_LOG_DIR, sess.graph)
tb_summary = tf.summary.merge_all()
# Training cycle with training data
for i in range(STEPS):
# fetch a batch from training dataset
batch_x, batch_y = x_train[i*BATCHSIZE:i*BATCHSIZE+BATCHSIZE], y_train[i*BATCHSIZE:i*BATCHSIZE+BATCHSIZE]
# calculate training accuracy & display it every 100 steps
train_accuracy = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
if i % 100 == 0:
print ("Train Step:", i, ' Training Accuracy: ', train_accuracy)
# Run graph for optimization - i.e. do the training
_, s = sess.run([optimizer, tb_summary], feed_dict={x: batch_x, y: batch_y})
writer.add_summary(s, i)
print("Training Finished!")
writer.close()
# Evaluation with test data
print ("Accuracy of trained network with test data:", sess.run(accuracy, feed_dict={x: x_test, y: y_test}))
print('Run `tensorboard --logdir=%s --port 6006 --host localhost` to see the results.' % TB_LOG_DIR)
|
from fields import *
import texttable
from collections import defaultdict
class Board:
col_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5}
def __init__(self):
self.matrix = [[FullHouse(), ThreeOfAKind(), FourOfAKind(), Straight(), One(), ThreeOfAKind()],
[Straight(), Six(), ThreeOfAKind(), FullHouse(), Yahtzee(), FourOfAKind()],
[FourOfAKind(), FullHouse(), Yahtzee(), Straight(), ThreeOfAKind(), Three()],
[FullHouse(), Straight(), Five(), FourOfAKind(), FullHouse(), ThreeOfAKind()],
[Two(), Yahtzee(), FullHouse(), ThreeOfAKind(), FourOfAKind(), Straight()],
[ThreeOfAKind(), FourOfAKind(), Straight(), Four(), Straight(), FullHouse()]
]
self.size = len(self.matrix)
self._setup_fields()
def place(self, loc, player, dice):
if loc is not None:
row, col = self._parse_location(loc)
field = self.matrix[row][col]
return field.place(player, dice)
return False
def _setup_fields(self):
for i, row in enumerate(self.matrix):
for j, field in enumerate(row):
field.position = (i, j)
def _parse_location(self, loc):
a, b = loc
if isinstance(a, int) and isinstance(b, int):
return a, b
elif b.isdigit():
a, b = b, a
return int(a), self.col_map[b]
def _four_in_a_row_fields(self, fields):
count = 0
last = None
for field in fields:
if field.top_player is None:
count = 0
lasat = None
elif field.top_player == last or last is None:
count += 1
last = field.top_player
if count == 4:
return True
return False
def four_in_a_row(self):
# check rows
for fields in self._rows():
if self._four_in_a_row_fields(fields):
return True
# check cols
for fields in self._cols():
if self._four_in_a_row_fields(fields):
return True
# check diagonals 1
for fields in self._diagonals():
if len(fields) >= 4 and self._four_in_a_row_fields(fields):
return True
return False
def fields(self):
for row in self.matrix:
for field in row:
yield field
def _cols(self):
for col in range(self.size):
yield [row[col] for row in self.matrix]
def _rows(self):
for row in self.matrix:
yield row
def _diagonals(self):
n = self.size
for y in range(self.size * 2 - 1):
yield [self.matrix[y - x][x] for x in range(n) if 0 <= y - x < n]
yield [self.matrix[y + x - n + 1][x] for x in range(n) if 0 <= y + x - n +1 < n]
def __repr__(self):
table = texttable.Texttable(max_width=120)
table.set_cols_align(["c"] * (self.size + 1))
table.add_row(["", "A", "B", "C", "D", "E", "F"])
for num, row in enumerate(self.matrix):
items = [num]
for field in row:
if field.top_player is None:
player_str = "-"
else:
player_str = field.top_player.name
items.append("%s\n\n%s\n%i" % (field.name, player_str, field.height))
table.add_row(items)
return table.draw()
|
# Generated by Django 2.0 on 2017-12-14 02:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yyfeed', '0003_auto_20170107_0937'),
]
operations = [
migrations.AlterField(
model_name='feed',
name='link',
field=models.URLField(max_length=4000),
),
migrations.AlterField(
model_name='feeditem',
name='link',
field=models.URLField(max_length=4000),
),
]
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import errno
import os
import unittest
import unittest.mock
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Iterator
import pytest
from pants.util import dirutil
from pants.util.contextutil import pushd, temporary_dir
from pants.util.dirutil import (
_mkdtemp_unregister_cleaner,
absolute_symlink,
fast_relpath,
group_by_dir,
longest_dir_prefix,
read_file,
relative_symlink,
rm_rf,
safe_concurrent_creation,
safe_file_dump,
safe_mkdir,
safe_mkdtemp,
safe_open,
safe_rmtree,
touch,
)
def strict_patch(target, **kwargs):
return unittest.mock.patch(target, autospec=True, spec_set=True, **kwargs)
class TestDirutilTest:
@pytest.fixture(autouse=True)
def _setup(self) -> None:
# Ensure we start in a clean state.
_mkdtemp_unregister_cleaner()
def test_longest_dir_prefix(self) -> None:
# Find the longest prefix (standard case).
prefixes = ["hello", "hello_world", "hello/world", "helloworld"]
assert longest_dir_prefix("hello/world/pants", prefixes) == "hello/world"
assert longest_dir_prefix("hello/", prefixes) == "hello"
assert longest_dir_prefix("hello", prefixes) == "hello"
assert longest_dir_prefix("scoobydoobydoo", prefixes) is None
def test_longest_dir_prefix_special(self) -> None:
# Ensure that something that is a longest prefix, but not a longest dir
# prefix, is not tagged.
prefixes = ["helloworldhowareyou", "helloworld"]
assert longest_dir_prefix("helloworldhowareyoufine/", prefixes) is None
assert longest_dir_prefix("helloworldhowareyoufine", prefixes) is None
def test_fast_relpath(self) -> None:
def assert_relpath(expected: str, path: str, start: str) -> None:
assert expected == fast_relpath(path, start)
assert_relpath("c", "/a/b/c", "/a/b")
assert_relpath("c", "/a/b/c", "/a/b/")
assert_relpath("c", "b/c", "b")
assert_relpath("c", "b/c", "b/")
assert_relpath("c/", "b/c/", "b")
assert_relpath("c/", "b/c/", "b/")
assert_relpath("", "c/", "c/")
assert_relpath("", "c", "c")
assert_relpath("", "c/", "c")
assert_relpath("", "c", "c/")
assert_relpath("c/", "c/", "")
assert_relpath("c", "c", "")
def test_fast_relpath_invalid(self) -> None:
with pytest.raises(ValueError):
fast_relpath("/a/b", "/a/baseball")
with pytest.raises(ValueError):
fast_relpath("/a/baseball", "/a/b")
@strict_patch("atexit.register")
@strict_patch("os.getpid")
@strict_patch("pants.util.dirutil.safe_rmtree")
@strict_patch("tempfile.mkdtemp")
def test_mkdtemp_setup_teardown(
self, tempfile_mkdtemp, dirutil_safe_rmtree, os_getpid, atexit_register
):
def faux_cleaner():
pass
DIR1, DIR2 = "fake_dir1__does_not_exist", "fake_dir2__does_not_exist"
# Make sure other "pids" are not cleaned.
dirutil._MKDTEMP_DIRS["fluffypants"].add("yoyo")
tempfile_mkdtemp.side_effect = (DIR1, DIR2)
os_getpid.return_value = "unicorn"
try:
assert DIR1 == dirutil.safe_mkdtemp(dir="1", cleaner=faux_cleaner)
assert DIR2 == dirutil.safe_mkdtemp(dir="2", cleaner=faux_cleaner)
assert "unicorn" in dirutil._MKDTEMP_DIRS
assert {DIR1, DIR2} == dirutil._MKDTEMP_DIRS["unicorn"]
dirutil._mkdtemp_atexit_cleaner()
assert "unicorn" not in dirutil._MKDTEMP_DIRS
assert {"yoyo"} == dirutil._MKDTEMP_DIRS["fluffypants"]
finally:
dirutil._MKDTEMP_DIRS.pop("unicorn", None)
dirutil._MKDTEMP_DIRS.pop("fluffypants", None)
dirutil._mkdtemp_unregister_cleaner()
atexit_register.assert_called_once_with(faux_cleaner)
assert os_getpid.called
assert [
unittest.mock.call(dir="1"),
unittest.mock.call(dir="2"),
] == tempfile_mkdtemp.mock_calls
assert sorted([unittest.mock.call(DIR1), unittest.mock.call(DIR2)]) == sorted(
dirutil_safe_rmtree.mock_calls
)
def test_safe_walk(self) -> None:
"""Test that directory names are correctly represented as unicode strings."""
# This test is unnecessary in python 3 since all strings are unicode there is no
# unicode constructor.
with temporary_dir() as tmpdir:
safe_mkdir(os.path.join(tmpdir, "中文"))
for _, dirs, _ in dirutil.safe_walk(tmpdir.encode()):
assert all(isinstance(dirname, str) for dirname in dirs)
@contextmanager
def tree(self) -> Iterator[tuple[str, str]]:
# root/
# a/
# b/
# 1
# 2
# 2 -> root/a/b/2
# b -> root/a/b
with temporary_dir() as root:
with safe_open(os.path.join(root, "a", "b", "1"), "wb") as fp:
fp.write(b"1")
touch(os.path.join(root, "a", "b", "2"))
os.symlink(os.path.join(root, "a", "b", "2"), os.path.join(root, "a", "2"))
os.symlink(os.path.join(root, "a", "b"), os.path.join(root, "b"))
with temporary_dir() as dst:
yield root, dst
@dataclass(frozen=True)
class Dir:
path: str
@dataclass(frozen=True)
class File:
path: str
contents: str
@classmethod
def empty(cls, path: str) -> TestDirutilTest.File:
return cls(path, contents="")
@classmethod
def read(cls, root: str, relpath: str) -> TestDirutilTest.File:
with open(os.path.join(root, relpath)) as fp:
return cls(relpath, fp.read())
@dataclass(frozen=True)
class Symlink:
path: str
def assert_tree(self, root: str, *expected: Dir | File | Symlink):
def collect_tree() -> (
Iterator[TestDirutilTest.Dir | TestDirutilTest.File | TestDirutilTest.Symlink]
):
for path, dirnames, filenames in os.walk(root, followlinks=False):
relpath = os.path.relpath(path, root)
if relpath == os.curdir:
relpath = ""
for dirname in dirnames:
dirpath = os.path.join(relpath, dirname)
if os.path.islink(os.path.join(path, dirname)):
yield self.Symlink(dirpath)
else:
yield self.Dir(dirpath)
for filename in filenames:
filepath = os.path.join(relpath, filename)
if os.path.islink(os.path.join(path, filename)):
yield self.Symlink(filepath)
else:
yield self.File.read(root, filepath)
assert frozenset(expected) == frozenset(collect_tree())
def test_relative_symlink(self) -> None:
with temporary_dir() as tmpdir_1: # source and link in same dir
source = os.path.join(tmpdir_1, "source")
link = os.path.join(tmpdir_1, "link")
rel_path = os.path.relpath(source, os.path.dirname(link))
relative_symlink(source, link)
assert os.path.islink(link)
assert rel_path == os.readlink(link)
def test_relative_symlink_source_parent(self) -> None:
with temporary_dir() as tmpdir_1: # source in parent dir of link
child = os.path.join(tmpdir_1, "child")
os.mkdir(child)
source = os.path.join(tmpdir_1, "source")
link = os.path.join(child, "link")
relative_symlink(source, link)
rel_path = os.path.relpath(source, os.path.dirname(link))
assert os.path.islink(link)
assert rel_path == os.readlink(link)
def test_relative_symlink_link_parent(self) -> None:
with temporary_dir() as tmpdir_1: # link in parent dir of source
child = os.path.join(tmpdir_1, "child")
source = os.path.join(child, "source")
link = os.path.join(tmpdir_1, "link")
relative_symlink(source, link)
rel_path = os.path.relpath(source, os.path.dirname(link))
assert os.path.islink(link)
assert rel_path == os.readlink(link)
def test_relative_symlink_same_paths(self) -> None:
with temporary_dir() as tmpdir_1: # source is link
source = os.path.join(tmpdir_1, "source")
with pytest.raises(ValueError, match=r"Path for link is identical to source"):
relative_symlink(source, source)
def test_relative_symlink_bad_source(self) -> None:
with temporary_dir() as tmpdir_1: # source is not absolute
source = os.path.join("foo", "bar")
link = os.path.join(tmpdir_1, "link")
with pytest.raises(ValueError, match=r"Path for source.*absolute"):
relative_symlink(source, link)
def test_relative_symlink_bad_link(self) -> None:
with temporary_dir() as tmpdir_1: # link is not absolute
source = os.path.join(tmpdir_1, "source")
link = os.path.join("foo", "bar")
with pytest.raises(ValueError, match=r"Path for link.*absolute"):
relative_symlink(source, link)
def test_relative_symlink_overwrite_existing_file(self) -> None:
# Succeeds, since os.unlink can be safely called on files that aren't symlinks.
with temporary_dir() as tmpdir_1: # source and link in same dir
source = os.path.join(tmpdir_1, "source")
link_path = os.path.join(tmpdir_1, "link")
touch(link_path)
relative_symlink(source, link_path)
def test_relative_symlink_exception_on_existing_dir(self) -> None:
# This historically was an uncaught exception, the tested behavior is to begin catching the error.
with temporary_dir() as tmpdir_1:
source = os.path.join(tmpdir_1, "source")
link_path = os.path.join(tmpdir_1, "link")
safe_mkdir(link_path)
with pytest.raises(
ValueError, match=r"Path for link.*overwrite an existing directory*"
):
relative_symlink(source, link_path)
def test_rm_rf_file(self, file_name="./foo") -> None:
with temporary_dir() as td, pushd(td):
touch(file_name)
assert os.path.isfile(file_name)
rm_rf(file_name)
assert not os.path.exists(file_name)
def test_rm_rf_dir(self, dir_name="./bar") -> None:
with temporary_dir() as td, pushd(td):
safe_mkdir(dir_name)
assert os.path.isdir(dir_name)
rm_rf(dir_name)
assert not os.path.exists(dir_name)
def test_rm_rf_nonexistent(self, file_name="./non_existent_file") -> None:
with temporary_dir() as td, pushd(td):
rm_rf(file_name)
def test_rm_rf_permission_error_raises(self, file_name="./perm_guarded_file") -> None:
with temporary_dir() as td, pushd(td), unittest.mock.patch(
"pants.util.dirutil.shutil.rmtree"
) as mock_rmtree, pytest.raises(OSError):
mock_rmtree.side_effect = OSError(errno.EACCES, os.strerror(errno.EACCES))
touch(file_name)
rm_rf(file_name)
def test_rm_rf_no_such_file_not_an_error(self, file_name="./vanishing_file") -> None:
with temporary_dir() as td, pushd(td), unittest.mock.patch(
"pants.util.dirutil.shutil.rmtree"
) as mock_rmtree:
mock_rmtree.side_effect = OSError(errno.ENOENT, os.strerror(errno.ENOENT))
touch(file_name)
rm_rf(file_name)
def assert_dump_and_read(self, test_content, dump_kwargs, read_kwargs):
with temporary_dir() as td:
test_filename = os.path.join(td, "test.out")
safe_file_dump(test_filename, test_content, **dump_kwargs)
assert read_file(test_filename, **read_kwargs) == test_content
def test_readwrite_file_binary(self) -> None:
self.assert_dump_and_read(b"333", {"mode": "wb"}, {"binary_mode": True})
with pytest.raises(Exception):
# File is not opened as binary.
self.assert_dump_and_read(b"333", {"mode": "w"}, {"binary_mode": True})
def test_readwrite_file_unicode(self) -> None:
self.assert_dump_and_read("✓", {"mode": "w"}, {"binary_mode": False})
with pytest.raises(Exception):
# File is opened as binary.
self.assert_dump_and_read("✓", {"mode": "wb"}, {"binary_mode": True})
def test_safe_concurrent_creation(self) -> None:
with temporary_dir() as td:
expected_file = os.path.join(td, "expected_file")
with safe_concurrent_creation(expected_file) as tmp_expected_file:
os.mkdir(tmp_expected_file)
assert os.path.exists(tmp_expected_file)
assert not os.path.exists(expected_file)
assert os.path.exists(expected_file)
def test_safe_concurrent_creation_noop(self) -> None:
with temporary_dir() as td:
expected_file = os.path.join(td, "parent_dir", "expected_file")
# Ensure safe_concurrent_creation() doesn't bomb if we don't write the expected files.
with safe_concurrent_creation(expected_file):
pass
assert not os.path.exists(expected_file)
assert os.path.exists(os.path.dirname(expected_file))
def test_safe_concurrent_creation_exception_handling(self) -> None:
with temporary_dir() as td:
expected_file = os.path.join(td, "expected_file")
with pytest.raises(ZeroDivisionError):
with safe_concurrent_creation(expected_file) as safe_path:
os.mkdir(safe_path)
assert os.path.exists(safe_path)
raise ZeroDivisionError("zomg")
assert not os.path.exists(safe_path)
assert not os.path.exists(expected_file)
def test_safe_rmtree_link(self):
with temporary_dir() as td:
real = os.path.join(td, "real")
link = os.path.join(td, "link")
os.mkdir(real)
os.symlink(real, link)
assert os.path.exists(real)
assert os.path.exists(link)
safe_rmtree(link)
assert os.path.exists(real)
assert not os.path.exists(link)
def test_group_by_dir(self) -> None:
paths = {
"foo/bar/baz1.ext",
"foo/bar/baz1_test.ext",
"foo/bar/qux/quux1.ext",
"foo/__init__.ext",
"foo/bar/__init__.ext",
"foo/bar/baz2.ext",
"foo/bar1.ext",
"foo1.ext",
"__init__.ext",
}
assert {
"": {"__init__.ext", "foo1.ext"},
"foo": {"__init__.ext", "bar1.ext"},
"foo/bar": {"__init__.ext", "baz1.ext", "baz1_test.ext", "baz2.ext"},
"foo/bar/qux": {"quux1.ext"},
} == group_by_dir(paths)
class AbsoluteSymlinkTest(unittest.TestCase):
def setUp(self) -> None:
self.td = safe_mkdtemp()
self.addCleanup(safe_rmtree, self.td)
self.source = os.path.join(self.td, "source")
self.link = os.path.join(self.td, "link")
def _create_and_check_link(self, source: str, link: str) -> None:
absolute_symlink(source, link)
assert os.path.islink(link)
assert source == os.readlink(link)
def test_link(self) -> None:
# Check if parent dirs will be created for the link
link = os.path.join(self.td, "a", "b", "c", "self.link")
self._create_and_check_link(self.source, link)
def test_overwrite_link_link(self) -> None:
# Do it twice, to make sure we can overwrite existing link
self._create_and_check_link(self.source, self.link)
self._create_and_check_link(self.source, self.link)
def test_overwrite_link_file(self) -> None:
with open(self.source, "w") as fp:
fp.write("evidence")
# Do it twice, to make sure we can overwrite existing link
self._create_and_check_link(self.source, self.link)
self._create_and_check_link(self.source, self.link)
# The link should have been deleted (over-written), not the file it pointed to.
with open(self.source) as fp:
assert "evidence" == fp.read()
def test_overwrite_link_dir(self) -> None:
nested_dir = os.path.join(self.source, "a", "b", "c")
os.makedirs(nested_dir)
# Do it twice, to make sure we can overwrite existing link
self._create_and_check_link(self.source, self.link)
self._create_and_check_link(self.source, self.link)
# The link should have been deleted (over-written), not the dir it pointed to.
assert os.path.isdir(nested_dir)
def test_overwrite_file(self) -> None:
touch(self.link)
self._create_and_check_link(self.source, self.link)
def test_overwrite_dir(self) -> None:
os.makedirs(os.path.join(self.link, "a", "b", "c"))
self._create_and_check_link(self.source, self.link)
|
from .example_net import ExampleNet
from .attention_net import AttenNet
from .best_net import BestNet
|
def list_all_run_instances(intent_request):
if intent_name == 'list_all_instances':
return list_all_instances(intent_request)
|
from .eod_data import EOD, Future_EOD, Charting, Technical
from .fundamental import Fundamental
from .index import Index, IndexSpecific
from .news import News
from .peers import Peers
from .sec import SEC
from .weekly import Weekly
from .sectors import Sectors
from .settings import Settings
from .ticker import Ticker
from .nostradamus_settings import Nostradamus_Settings
from .stats import Stats
|
from matplotlib import pyplot as plt
import numpy as np
n = np.linspace(0,199,200)
#User-input x(n)
function_xn = input('Enter a function x(n): ') #NOTE: Test input is np.sin(((3*n*np.pi)/100))
def x(n):
fxn_x = eval(function_xn)
return fxn_x
#piecewise function y(n)
for a in range(200):
if a==0:
y = ((-1.5)*x(n)) + (2*x(n+1)) - (0.5*x(n+2))
elif a>0 and a<=199:
y = (0.5*x(n+1)) - (0.5*x(n-1))
else:
y = (1.5*x(n)) - (2*x(n-1)) + (0.5*x(n-2))
#Graph of x(n) and y(n)
plt.plot(x(n),'-', c = 'tab:purple', label = 'x(n)')
plt.plot(y, '-', c = 'tab:pink', label = 'y(n)')
plt.legend()
plt.title('Graphs of x(n) and y(n)')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.grid()
plt.show()
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import Counter
total=0
s=[]
number_of_shoes = input()
all_shoes_size = Counter(list(input().split()))
num_customers = input()
for n in range(int(num_customers)):
s.append((input().split()))
if all_shoes_size[s[n][0]]>0:
total +=int(s[n][1])
all_shoes_size[s[n][0]] = all_shoes_size[s[n][0]]-1
print(total)
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import mics
import pandas as pd
plt.rcParams.update({'font.size':8})
fig = plt.figure('axes',figsize=(3.0,2.0))
ax = fig.add_subplot(111)
ax.set_xlabel('Time (ns)')
ax.set_ylabel('Total Energy (kcal/mol)')
timesteps = [1,2,3,4]
color = ['blue', 'green', 'red', 'black' ]
ax.set_xlim(-0.1,6.1)
ax.set_ylim(-6480,-6460)
xy = [[(0.8, 0.07),(0.8, 0.42),(0.8, 0.63),(0.8, 0.9)], \
[(0.87, 0.035),(0.72, 0.2),(0.6, 0.2),(0.47, 0.8)]]
yticks = [-6480,-6475,-6470,-6465,-6460]
ax.set_yticks(yticks)
captions = [r'$h=1$ fs',r'$h=2$ fs',r'$h=3$ fs',r'$h=4$ fs']
color_c = ['black', 'red', 'green', 'blue' ]
k = 0
for i in reversed(timesteps):
df = pd.read_csv('NVE_' + str(i) + 'fs.csv')
ax.plot(df['Step']*i*1e-6-0.9, df['TotEng'], color = color[k],label=r'$\mathcal{H}$')
ax.plot(df['Step']*i*1e-6-0.9, df['Hs'],linestyle = 'dotted', color = color[k],label=r'$\widetilde{\mathcal{H}}$')
ax.annotate(captions[k], xy=(0, 0) , xycoords= 'axes fraction', xytext=xy[0][k], \
textcoords='axes fraction', color=color_c[k])
k = k + 1
fig.savefig('NVE.eps', format='eps', dpi=600, bbox_inches='tight')
|
# -*- coding: utf-8 -*-
from collections import deque
class Solution:
def isValid(self, s):
stack = deque()
for c in s:
if c == "(":
stack.append(")")
elif c == "[":
stack.append("]")
elif c == "{":
stack.append("}")
elif c == ")" or c == "]" or c == "}":
try:
top = stack.pop()
if top != c:
return False
except IndexError:
return False
return len(stack) == 0
if __name__ == "__main__":
solution = Solution()
assert solution.isValid("()")
assert solution.isValid("()[]{}")
assert not solution.isValid("(]")
assert not solution.isValid("([)]")
assert solution.isValid("{[]}")
assert not solution.isValid("]")
assert not solution.isValid("[")
|
from django.contrib import admin
from .models import canditates,rounds_table,role_table
admin.site.register(canditates)
admin.site.register(role_table)
admin.site.register(rounds_table)
|
"""Mocks used for testing."""
import httmock
# Modoboa API mocks
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/instances/search/", method="post")
def modo_api_instance_search(url, request):
"""Return empty response."""
return {"status_code": 404}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/instances/", method="post")
def modo_api_instance_create(url, request):
"""Simulate successful creation."""
return {
"status_code": 201,
"content": {"pk": 100}
}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/instances/.+/", method="put")
def modo_api_instance_update(url, request):
"""Simulate successful update."""
return {"status_code": 200}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/versions/", method="get")
def modo_api_versions(url, request):
"""Simulate versions check."""
return {
"status_code": 200,
"content": [
{"name": "modoboa", "version": "9.0.0", "url": ""},
]
}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/versions/", method="get")
def modo_api_versions_no_update(url, request):
"""Simulate versions check."""
return {
"status_code": 200,
"content": [
{"name": "modoboa", "version": "0.0.0", "url": ""},
]
}
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from flask import Blueprint, render_template, g
from scriptfan.models import *
postapp = Blueprint("post", __name__, url_prefix="/post")
@postapp.route('/')
def index():
g.posts = Post.objects.all()
return render_template('post/index.html')
|
USER_TOKENS = 'user_tokens'
ADMIN_TOKENS = 'admin_tokens'
TOKEN_VALUE = 'token_value'
USERNAME = 'username'
NAME = 'name'
EMAIL = 'email'
TOKENS = 'tokens'
UNKNOWN = 'unknown'
HOST = 'host'
PORT = 'port'
USE_SSL = 'use_ssl'
USE_WSGI = 'use_wsgi'
VALIDATE_SSL = 'validate_ssl'
USE_UWSGI = 'use_uwsgi'
CERT_PEM = 'cert_pem'
KEY_PEM = 'key_pem'
USER = 'user'
PASSWORD = 'password'
DB = 'db'
DATABASE = 'database'
COLLECTION = 'collection'
TABLE = 'table'
USING_SQLALCHEMY = 'using_postgres'
SQLALCHEMY_HOST = 'sqlalchemy_host'
SQLALCHEMY_PORT = 'sqlalchemy_port'
SQLALCHEMY_USER = 'sqlalchemy_user'
SQLALCHEMY_PASS = 'sqlalchemy_password'
SQLALCHEMY_USE_SSL = 'sqlalchemy_use_ssl'
SQLALCHEMY_DB = 'sqlalchemy_db'
SQLALCHEMY_ORMS = 'sqlalchemy_orms'
TLS_INSECURE = 'tlsInsecure'
SSL_KEYFILE = 'ssl_keyfile'
SSL_CERTFILE = 'ssl_certfile'
DEFAULT_ENGINE_SETTINGS = 'default_engine_settings'
ODMS = 'odms'
ORMS = 'orms'
ORM_CLASS = 'odm_class'
ODM_CLASS = 'odm_class'
CLASS = 'class'
CONTROLLER = 'controller'
RESOURCE = 'resource'
# ORM_CLASS = 'orm_class'
# HOST = 'host'
# PORT = 'port'
# USE_SSL = 'use_ssl'
# USERNAME = 'username'
# PASSWORD = 'password'
CERT = 'cert'
KEY = 'key'
CONNECTION = 'connection'
dialect = 'dialect'
USING_MONGO = 'using_mongo'
MONGO_HOST = 'mongo_host'
MONGO_PORT = 'mongo_port'
MONGO_USER = 'mongo_user'
MONGO_PASS = 'mongo_password'
MONGO_USE_SSL = 'mongo_use_ssl'
MONGO_DB = 'mongo_db'
MONGO_ODMS = 'mongo_odms'
MONGO_CONNECTION = 'mongo_connection'
MONGO_DATABASE = 'mongo_database'
MONGO_COLLECTION = 'mongo_collection'
VIEWS = 'views'
META = 'Meta'
TABLE__ = '__table__'
FULLNAME = 'fullname'
DESCRIPTION = 'description'
ENGINE = 'engine'
REST_SERVICE_BLOCK = 'rest-service'
REST_SERVICE_CONFIGS = {
USE_SSL: False,
USE_UWSGI: False,
HOST: '127.0.0.1',
PORT: 8000,
VALIDATE_SSL: False,
CERT_PEM: None,
KEY_PEM: None,
VIEWS: list(),
# Mongo related
USING_MONGO: False,
MONGO_HOST: None,
MONGO_PORT: None,
MONGO_USER: None,
MONGO_PASS: None,
MONGO_USE_SSL: False,
MONGO_DB: None,
MONGO_ODMS: list(),
ODMS: {},
# Postgres related
USING_SQLALCHEMY: False,
SQLALCHEMY_HOST: None,
SQLALCHEMY_PORT: None,
SQLALCHEMY_USER: None,
SQLALCHEMY_PASS: None,
SQLALCHEMY_USE_SSL: False,
SQLALCHEMY_DB: None,
ORMS: {},
}
MONGODB_SETTINGS = 'MONGODB_SETTINGS'
MONGODB_HOST = 'MONGODB_HOST'
MONGODB_PORT = 'MONGODB_PORT'
MONGODB_USER = 'MONGODB_USERNAME'
MONGODB_PASS = 'MONGODB_PASSWORD'
MONGODB_DB = 'MONGODB_DB'
MAP_MONGO_TO_APP = {
MONGO_HOST: HOST,
MONGO_PORT: PORT,
MONGO_USER: USERNAME,
MONGO_PASS: PASSWORD,
# MONGO_DB: DB,
}
MAP_MONGO_TO_SETTINGS = {
MONGO_HOST: MONGODB_HOST,
MONGO_PORT: MONGODB_PORT,
MONGO_USER: MONGODB_USER,
MONGO_PASS: MONGODB_PASS,
MONGO_DB: MONGODB_DB,
}
SQLALCHEMYDB_SETTINGS = 'SQLALCHEMYDB_SETTINGS'
MAP_SQLALCHEMY_TO_APP = {
SQLALCHEMY_HOST: HOST,
SQLALCHEMY_PORT: PORT,
SQLALCHEMY_USER: USERNAME,
SQLALCHEMY_PASS: PASSWORD,
SQLALCHEMY_DB: DATABASE,
}
DIALECT_DRIVER = 'dialect_driver'
SQLALCHEMY_DIALECT = 'postgresql+psycopg2'
MONGODB_DIALECT = 'mongodb'
SQLALCHEMY_DATABASE_URI ='SQLALCHEMY_DATABASE_URI'
REQUIRED_DB_URI_FMT_KEYS = [DIALECT_DRIVER, USERNAME, PASSWORD, HOST, PORT]
DB_URI_FMT = '{dialect_driver}://{username}:{password}@{host}:{port}/{database}'
DB_URI_NO_DB_FMT = '{dialect_driver}://{username}:{password}@{host}:{port}'
ODM_DATABASE = 'odm_database'
ODM_COLLECTION = 'odm_collection'
ODM_CONNECTION = 'odm_connection'
ORM_DATABASE = 'orm_database'
ORM_COLLECTION = 'orm_collection'
ORM_CONNECTION = 'orm_connection'
ORM_TABLE = 'orm_table'
|
import time
import xlsxwriter
from pyrebase import pyrebase
import firebaseConfigFile
# connect firebase
firebase = pyrebase.initialize_app(firebaseConfigFile.firebaseConfig)
storage = firebase.storage()
db = firebase.database()
# create excel file for answers
workbook = xlsxwriter.Workbook('Answer.xlsx')
worksheet = workbook.add_worksheet("Answer")
def writeFirebase(obj, device):
i = 0
j = 1
# write device names to excel
worksheet.write(0, deviceNum.index(device), str(device))
# if runstate is true,
while obj.runstate:
questionarray = obj.getQuestion(device)
if (db.child(f"state/{device}").get().val() == 0) & (len(questionarray) > i):
# get last answer from firebase
answer = db.child(f"answers/{device}").get().val()
# write answer to excel
worksheet.write(j, deviceNum.index(device), str(answer))
# write question to firebase
db.child(f"questions/{device}/question").set(questionarray[i][0])
db.child(f"questions/{device}/a").set(questionarray[i][1])
db.child(f"questions/{device}/b").set(questionarray[i][2])
db.child(f"questions/{device}/c").set(questionarray[i][3])
db.child(f"questions/{device}/d").set(questionarray[i][4])
db.child(f"questions/{device}/e").set(questionarray[i][5])
# load question image to firebase
storage.child(f"{device}/1.jpg").put(f"image/{questionarray[i][6]}")
# set question state
db.child(f"state/{device}").set(1)
i = i + 1
j = j + 1
else:
time.sleep(0.5)
answer = db.child(f"answers/{device}").get().val()
worksheet.write(j, deviceNum.index(device), str(answer))
workbook.close()
def getDevices():
# get all devices from firebase
dev = db.child("state/").get().val()
return list(dev.keys())
def clearDatabase():
# clear all database
db.child("state").remove()
db.child("questions").remove()
db.child("answers").remove()
deviceNum = getDevices()
|
# Config file with locations of various binaries, hostnames etc
#
# Change the SITE variable to run the experiments on different testbed setup
import argparse
import sys
import types
site_config_parser = argparse.ArgumentParser(description='Site config variables')
site_config_parser.add_argument('--var', dest='var',
help='Config variable to read', default='')
#############
SITE = 'Siva-MC'
config = {}
if SITE == 'Vimal':
config['RL_MODULE_NAME'] = ''
config['RL_MODULE'] = ''
config['NETPERF_DIR'] = '/root/vimal/exports/netperf'
config['SHELL_PROMPT'] = '#'
config['UDP'] = '/root/vimal/rl-qfq/utils/udp'
config['TC'] = '/root/vimal/rl-qfq/iproute2/tc/tc'
config['QFQ_PATH'] = '/root/vimal/rl-qfq/sch_qfq.ko'
config['CLASS_RATE'] = '/root/vimal/rl-qfq/utils/class-rate.py'
# Server/client nodes
config['DEFAULT_HOSTS'] = ['e2', 'e1']
# Interface details for each node
config['DEFAULT_DEV'] = { 'e2' : 'eth2', 'e1' : 'eth2' }
# NIC details
config['NIC_VENDOR'] = 'Emulex'
config['NIC_HW_QUEUES'] = 4
# Taskset CPU for UDP program
config['UDP_CPU'] = 2
config['NUM_CPUS'] = 8
config['EXCLUDE_CPUS'] = []
'''
CPU numbering on lancelots:
(0 4) (Core 0, two hyperthreads)
(1 5)
(2 6)
(3 7)
So, we set TX interrupt on CPU 0
RX-0 on CPU 1
RX-1 on CPU 2
RX-2 on CPU 3
RX-3 on CPU 5
RX-4 on CPU 6
'''
config['INTR_MAPPING'] = [0, 1, 3, 5, 6]
# Sniffer host with Myri10G sniffer
config['SNIFFER_HOST'] = ''
config['SNIFFER'] = ''
config['SNIFFER_TMPDIR'] = ''
config['SNIFFER_CPU'] = 2
config['SNIFFER_DELAY'] = 15 # Seconds to delay sniffer initially
config['SNIFFER_DURATION'] = 10 # Seconds to sniff traffic
# Experiment script configuration
config['EXPT_RATES'] = '1000 3000 5000 7000 9000'
config['EXPT_NCLASSES'] = '1 8 16 512 2048' # Number of traffic classes
config['EXPT_RL'] = 'none htb hwrl'
config['EXPT_RUN'] = '1 2 3'
# tmp directory for plotting sniffer graphs
config['PLOT_TMPDIR'] = '/tmp/'
elif SITE == 'Siva':
config['RL_MODULE_NAME'] = ''
config['RL_MODULE'] = ''
config['NETPERF_DIR'] = '/home/ssradhak/src/software/netperf/bin'
config['SHELL_PROMPT'] = '$'
config['UDP'] = '/home/ssradhak/src/rate_limiting/qfq-rl-eval/utils/udp'
config['TC'] = '/home/ssradhak/src/rate_limiting/iproute2/tc/tc'
config['QFQ_PATH'] = '/home/ssradhak/src/rate_limiting/qfq-rl/sch_qfq.ko'
config['CLASS_RATE'] = '/home/ssradhak/src/rate_limiting/qfq-rl-eval/utils/class-rate.py'
config['TRAFGEN'] = '/home/ssradhak/src/rate_limiting/trafgen/trafgen'
config['PLOT_SCRIPTS_DIR'] = '/home/ssradhak/src/rate_limiting/qfq-rl-eval/plot'
# Server/client nodes
config['DEFAULT_HOSTS'] = ['192.168.2.80', '192.168.2.64']
# Interface details for each node
config['DEFAULT_DEV'] = { '192.168.2.64' : 'eth1',
'192.168.2.80' : 'eth2' }
# NIC details
config['NIC_VENDOR'] = 'Intel'
config['NIC_HW_QUEUES'] = 16
# Mellanox NIC QOS scripts
config['TC_WRAP'] = '/home/ssradhak/src/rate_limiting/mellanox/QoS_upstream/tc_wrap.py'
config['MLNX_QOS'] = '/home/ssradhak/src/rate_limiting/mellanox/QoS_upstream/mlnx_qos'
# Taskset CPU for single UDP program
config['UDP_CPU'] = 2
config['NUM_CPUS'] = 16
config['EXCLUDE_CPUS'] = []
'''
CPU numbering on SEED testbed (dcswitch81):
(0 8) (Socket 0, Core 0, two hyperthreads)
(4 12) (Socket 0, Core 1, two hyperthreads)
(2 10) (Socket 0, Core 2, two hyperthreads)
(6 14) (Socket 0, Core 3, two hyperthreads)
(1 9) (Socket 1, Core 0, two hyperthreads)
(5 13) (Socket 1, Core 1, two hyperthreads)
(3 11) (Socket 1, Core 2, two hyperthreads)
(7 15) (Socket 1, Core 3, two hyperthreads)
'''
config['INTR_MAPPING'] = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
# Sniffer host with Myri10G sniffer
config['SNIFFER_HOST'] = ''
config['SNIFFER'] = '/home/ssradhak/src/rate_limiting/sniffer/tcpdump_tool/snf_simple_tcpdump'
config['SNIFFER_TMPDIR'] = '/mnt/disks/cciss/c0d0p1/ssradhak/sniffer/'
config['SNIFFER_CPU'] = 2
config['SNIFFER_DELAY'] = 25 # Seconds to delay sniffer initially
config['SNIFFER_DURATION'] = 10 # Seconds to sniff traffic
# Experiment script configuration
config['EXPT_RATES'] = '1000 5000 9000'
config['EXPT_NCLASSES'] = '8 16 32 64 256 512' # Number of traffic classes
config['EXPT_RL'] = 'htb hwrl'
config['EXPT_RUN'] = '1 2 3'
# tmp directory for plotting sniffer graphs
#config['PLOT_TMPDIR'] = '/home/ssradhak/tmp/plot/'
config['PLOT_TMPDIR'] = '/mnt/disks/cciss/c1d1p1/ssradhak/tmp/plot/'
elif SITE == 'Siva-MC':
config['RL_MODULE_NAME'] = ''
config['RL_MODULE'] = ''
config['SHELL_PROMPT'] = '$'
config['TC'] = '/home/ssradhak/src/rate_limiting/iproute2/tc/tc'
config['QFQ_PATH'] = '/home/ssradhak/src/rate_limiting/qfq-rl/sch_qfq.ko'
config['EYEQ_PATH'] = '/home/ssradhak/src/rate_limiting/eyeq++/sch_eyeq.ko'
config['TRAFGEN'] = '/home/ssradhak/src/rate_limiting/trafgen/trafgen'
config['PLOT_SCRIPTS_DIR'] = '/home/ssradhak/src/rate_limiting/qfq-rl-eval/plot'
# Server/client nodes
config['DEFAULT_MC_SERVERS'] = ['192.168.2.64']
config['DEFAULT_MC_CLIENTS'] = ['192.168.2.80',
'192.168.2.63',
#'192.168.2.65',
'192.168.2.67',
'192.168.2.108',
'192.168.2.109',
'192.168.2.110',
'192.168.2.107']
# Interface details for each node
config['DEFAULT_DEV'] = { '192.168.2.64' : 'eth1',
'192.168.2.80' : 'eth2',
'192.168.2.63' : 'eth1',
#'192.168.2.65' : 'eth1',
'192.168.2.67' : 'eth1',
'192.168.2.108' : 'eth1',
'192.168.2.109' : 'eth1',
'192.168.2.110' : 'eth1',
'192.168.2.107' : 'eth1' }
# NIC details
config['NIC_VENDOR'] = 'Intel'
config['NIC_HW_QUEUES'] = 16
# Mellanox NIC QOS scripts
config['TC_WRAP'] = '/home/ssradhak/src/rate_limiting/mellanox/QoS_upstream/tc_wrap.py'
config['MLNX_QOS'] = '/home/ssradhak/src/rate_limiting/mellanox/QoS_upstream/mlnx_qos'
# CPUs available for tenants
config['NUM_CPUS'] = 16
config['EXCLUDE_CPUS'] = [2, 10, 11,13]
#config['EXCLUDE_CPUS'] = [2, 10]
# Sniffer host with Myri10G sniffer
config['SNIFFER_HOST'] = ''
config['SNIFFER'] = '/home/ssradhak/src/rate_limiting/sniffer/tcpdump_tool/snf_simple_tcpdump'
config['SNIFFER_TMPDIR'] = '/mnt/disks/cciss/c0d0p1/ssradhak/sniffer/'
config['SNIFFER_CPU'] = 2
config['SNIFFER_DELAY'] = 25 # Seconds to delay sniffer initially
config['SNIFFER_DURATION'] = 10 # Seconds to sniff traffic
# Experiment script configuration
config['EXPT_RATES'] = '1000 5000 9000'
config['EXPT_RL'] = 'htb hwrl'
config['EXPT_RUN'] = '1 2 3'
# tmp directory for plotting sniffer graphs
config['PLOT_TMPDIR'] = '/mnt/disks/cciss/c1d1p1/ssradhak/tmp/plot/'
##########################################################################
# Use this as a script that returns value of a variable to be used in bash
# scripts etc.
##########################################################################
def main(argv):
# Parse flags
args = site_config_parser.parse_args()
if args.var == '' or args.var not in config:
return
print config[args.var]
if __name__ == '__main__':
main(sys.argv)
|
class Solution():
def addtion(self, nums, t):
a = dict()
for i in range(len(nums)):
if (t - nums[i]) in a:
return i, a[(t-nums[i])]
else:
a[nums[i]] = i
nums = [1,'3',5,6]
t = 11
nums1 = range(10)
solu = Solution()
print(solu.addtion(nums, t))
|
"""
Goal:
* List open MR.
Todos:
* Make project and group arguments exclusive.
How to:
* Get help
- python list_mrs.py -h
* The Private Token can be given as environemnt variable GITLAB_PRIVATE_TOKEN
- I read the password using pass (cli password manager)
- GITLAB_PRIVATE_TOKEN=$(pass show work/CSS/gitlab/private_token) python list_mrs.py --url <gitlab_url> --group <group_id> --project=<project_id>
* The Private Token can be given as argument (-t, --token)
- python list_mrs.py --token $(pass show work/CSS/gitlab/private_token) --url <gitlab_url> --group <group_id> --project <gitlab_project_id>
* If the Private Token is set both ways, GITLAB_PRIVATE_TOKEN has precedence.
* The gitlab project id can be given as environemnt variable GITLAB_PROJECT_ID
* The gitlab project id can be given as argument (-p, --project)
* If the gitlab project id is set both ways, GITLAB_PROJECT_ID has precedence.
* The url can be given as argument (-u, --url)
Attention:
* For now, to make '--group <group_id>' working set '--project_id=""'.
"""
import requests
import os
import sys
import logging
from get_empty_mrs import empty_mrs
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
STATE_DEFAULT = "all"
PRIVATE_TOKEN = os.environ.get("GITLAB_PRIVATE_TOKEN", None)
PROJECT_ID = os.environ.get("GITLAB_PROJECT_ID", None)
GROUP_ID = os.environ.get("GITLAB_GROUP_ID", None)
URL = os.environ.get("GITLAB_URL", None)
GITHUB_API_ENDPOINT = "/api/v4"
ISSUES_ENDPOINT = "/issues"
PROJECT_ENDPOINT = "/projects" + "/{project_id}"
GROUP_ENDPOINT = "/groups" + "/{group_id}"
MR_ENDPOINT = "/merge_requests"
TAGS_ENDPOINT = "/repository/tags"
PROJECT_TAGS_ENDPOINT = f"{PROJECT_ENDPOINT}" + f"{TAGS_ENDPOINT}"
CAN_BE_MERGED = "can_be_merged"
CANNOT_BE_MERGED = "cannot_be_merged"
def list_mrs(
url=URL, project_id=PROJECT_ID, group_id=GROUP_ID, state=STATE_DEFAULT, headers=None
):
url = url + GITHUB_API_ENDPOINT
endpoint = ""
if project_id:
endpoint = PROJECT_ENDPOINT.format(project_id=project_id)
elif group_id:
endpoint = GROUP_ENDPOINT.format(group_id=group_id)
complete_url = url + endpoint + MR_ENDPOINT + f"?state={state}"
response = requests.get(url=complete_url, headers=headers)
if response.status_code not in [200, 201]:
return {
"error": "Cannot get merge request.",
"reason": "Received status code {response.status_code} with {response.text}.",
"project_id": project_id,
"url": complete_url,
}
sys.exit(1)
json_response = response.json()
logger.debug(json_response)
mrs = list()
for mr in json_response:
iid = mr.get("iid", None)
merge_status = mr.get("merge_status", None)
has_conflicts = mr.get("has_conflicts", None)
web_url = mr.get("web_url", None)
project_id = mr.get("project_id", None)
# user_info = mr.get("user", None)
# assignee_can_merge = None
# if user_info:
# assignee_can_merge = user_info.get("can_merge", None)
mrs.append(
{
"iid": iid,
# "assignee_can_merge": assignee_can_merge,
"merge_status": merge_status,
"has_conflicts": has_conflicts,
"web_url": web_url,
"project_id": project_id,
"group_id": group_id,
# "assignee_id": assignee_id,
# "source_branch": source_branch,
# "target_branch": target_branch,
}
)
return mrs
def main():
import argparse
import json
from _version import __version__
parser = argparse.ArgumentParser(
description="List open MR.",
epilog="python list_mrs.py --token $(pass show work/CSS/gitlab/private_token) --url <gitlab_url> --project <gitlab_project_id>",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s {version}".format(version=__version__),
)
parser.add_argument(
"-u",
"--url",
required=True,
default="https://example.gitlab.com",
help="Gitlab host/url/server.",
)
parser.add_argument(
"-p", "--project", required=True, default="-1", help="Gitlab project id."
)
parser.add_argument(
"-g", "--group", required=True, default="-1", help="Gitlab group id."
)
parser.add_argument(
"-s", "--state", default=STATE_DEFAULT, help="State of MRs to be listed."
)
parser.add_argument(
"-t",
"--token",
nargs="?",
help="Private Token to access gitlab API. If not given as argument, set GITLAB_PRIVATE_TOKEN.",
)
parser.add_argument("--debug", action="store_true", help="Show debug info.")
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logging.getLogger("EMPTY_MRS").setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.getLogger("EMPTY_MRS").setLevel(logging.INFO)
private_token = args.token
project_id = args.project
group_id = args.group
state = args.state
url = args.url
headers = {
"PRIVATE-TOKEN": private_token,
"Content-Type": "application/json",
}
listed_mrs = list_mrs(
url=url, project_id=project_id, group_id=group_id, state=state, headers=headers
)
print(*listed_mrs, sep="\n")
# for k, v in tags.items():
# print(k)
# print(" " + "\n ".join(t for t in v))
for mr in listed_mrs:
# Get not mergeable MRs with conflicts.
# This seems to contains empty MRs.
result = empty_mrs(url=url, mr=mr, headers=headers)
if result:
print(json.dumps(result, indent=4))
if __name__ == "__main__":
sys.exit(main())
|
"""
run_models_.py
Purpose: Predict gene expression given graph structure and node features
Usage: python ./run_model_.py [-c <str>] [-rf <int>] [-me <int>] [-lr <float>]
[-cn <int] [-gs <int>] [-ln <int>] [-ls <int>]
Arguments:
'-c', '--cell_line', default='E116', type=str
'-rf', '--regression_flag', default=1 (1 = regression; 0 = classification), type=int
'-me', '--max_epoch', default=1000,type=int)
'-lr', '--learning_rate', default=1e-4, type=float)
'-cn', '--num_graph_conv_layers', default=2, type=int)
'-gs', '--graph_conv_layer_size', default=256, type=int)
'-ln', '--num_lin_layers', default=3, type=int)
'-ls', '--lin_hidden_layer_size', default=256, type=int)
Processed inputs:
In ./data/cell_line subdirectory:
./hic_sparse.npz: Concatenated Hi-C matrix in sparse CSR format
./np_nodes_lab_genes_reg[rf].npy: Numpy array stored in binary format, where
rf denotes regression flag (rf = 1 for regression, 0 for classification);
2-column array that stores IDs of nodes corresponding to genes
and the node label (expression level)
./np_hmods_norm_chip_10000bp.npy: Numpy array stored in binary format;
(F+1)-column array where the 0th column contains node IDs
and columns 1..F contain feature values, where F = total number of features
./df_genes_reg[rf].pkl: Pandas dataframe stored in .pkl format, where
rf denotes regression flag (rf = 1 for regression, 0 for classification);
5-column dataframe, where columns = [ENSEMBL ID,
gene name abbreviation, node ID, expression level, connected status]
*Note: Users can prepare these files or use process_inputs.py script provided
Outputs:
In ./data/cell_line/saved_runs subdirectory:
model_[date_and_time].pt: Model state dictionary stored in .pt (PyTorch) format
model_predictions_[date_and_time].csv: Predictions for each gene with the following columns:
Classification: [Dataset, Node ID, ENSEMBL ID,
gene name abbreviation, true label, predicted label, classification [TP/TN/FP/FN]]
Regression: [Dataset, Node ID, ENSEMBL ID,
gene name abbreviation, true expression, predicted expression]
*Note: Expression values are obtained by taking the base-10 logarithm
of the RNA-seq counts and adding a pseudocount of 1 prior to taking the logarithm
model_[date_and_time]_info.txt: Text file containing summary of model
evaluation metrics as well as hyperparameter settings
"""
import os
import argparse
import time
from datetime import datetime, date
import random
import numpy as np
from scipy.sparse import load_npz
from scipy.stats import pearsonr
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
import pandas as pd
import torch
import torch_geometric
from model_classes_ import GCN_classification, GCN_regression
def train_model_classification(model, graph, max_epoch, learning_rate, targetNode_mask, train_idx, valid_idx, optimizer):
'''
Trains model for classification task
Parameters
----------
model [GCN_classification]: Instantiation of model class
graph [PyG Data class]: PyTorch Geometric Data object representing the graph
max_epoch [int]: Maximum number of training epochs
learning_rate [float]: Learning rate
targetNode_mask [tensor]: Subgraph mask for training nodes
train_idx [array]: Node IDs corresponding to training set
valid_idx [array]: Node IDs corresponding to validation set
optimizer [PyTorch optimizer class]: PyTorch optimization algorithm
Returns
-------
train_loss_vec [array]: Training loss for each epoch
train_AUROC_vec [array]: Training AUROC score for each epoch
valid_loss_vec [array]: Validation loss for each epoch
valid_AUROC_vec [array]: Validation AUROC score for each epoch
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
graph = graph.to(device)
optimizer = optimizer
train_labels = to_cpu_npy(graph.y[targetNode_mask[train_idx]])
valid_labels = to_cpu_npy(graph.y[targetNode_mask[valid_idx]])
train_loss_list = []
train_AUROC_vec = np.zeros(np.shape(np.arange(max_epoch)))
valid_loss_list = []
valid_AUROC_vec = np.zeros(np.shape(np.arange(max_epoch)))
model.train()
train_status = True
print('\n')
for e in list(range(max_epoch)):
if e%100 == 0:
print("Epoch", str(e), 'out of', str(max_epoch))
model.train()
train_status = True
optimizer.zero_grad()
### Only trains on nodes with genes due to masking
forward_scores = model(graph.x.float(), graph.edge_index, train_status)[targetNode_mask]
train_scores = forward_scores[train_idx]
train_loss = model.loss(train_scores, torch.LongTensor(train_labels).to(device))
train_softmax, _ = model.calc_softmax_pred(train_scores)
train_loss.backward()
optimizer.step()
### Calculate training and validation loss, AUROC scores
model.eval()
valid_scores = forward_scores[valid_idx]
valid_loss = model.loss(valid_scores, torch.LongTensor(valid_labels).to(device))
valid_softmax, _ = model.calc_softmax_pred(valid_scores)
train_loss_list.append(train_loss.item())
train_softmax = to_cpu_npy(train_softmax)
train_AUROC = roc_auc_score(train_labels, train_softmax[:,1], average="micro")
valid_loss_list.append(valid_loss.item())
valid_softmax = to_cpu_npy(valid_softmax)
valid_AUROC = roc_auc_score(valid_labels, valid_softmax[:,1], average="micro")
train_AUROC_vec[e] = train_AUROC
valid_AUROC_vec[e] = valid_AUROC
train_loss_vec = np.reshape(np.array(train_loss_list), (-1, 1))
valid_loss_vec = np.reshape(np.array(valid_loss_list), (-1, 1))
return train_loss_vec, train_AUROC_vec, valid_loss_vec, valid_AUROC_vec
def eval_model_classification(model, graph, targetNode_mask, train_idx, valid_idx, test_idx):
'''
Runs fully trained classification model and compute evaluation statistics
Parameters
----------
model [GCN_classification]: Instantiation of model class
graph [PyG Data class]: PyTorch Geometric Data object representing the graph
targetNode_mask [tensor]: Mask ensuring model only trains on nodes with genes
train_idx [array]: Node IDs corresponding to training set;
analogous for valid_idx and test_idx
Returns
-------
test_AUROC [float]: Test set AUROC score;
analogous for train_AUROC (training set) and valid_AUPR (validation set)
test_AUPR [float]: Test set AUPR score
analogous for train_AUPR (training set) and valid_AUPR (validation set)
test_pred [array]: Test set predictions;
analogous for train_pred (training set) and valid_pred (validation set)
test_labels [array]: Test set labels;
analagous for train_labels (training set) and valid_labels (validation set)
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
graph = graph.to(device)
test_labels = to_cpu_npy(graph.y[targetNode_mask[test_idx]])
model.eval()
train_status=False
forward_scores = model(graph.x.float(), graph.edge_index, train_status)[targetNode_mask]
test_scores = forward_scores[test_idx]
test_softmax, test_pred = model.calc_softmax_pred(test_scores)
test_softmax = to_cpu_npy(test_softmax)
test_pred = to_cpu_npy(test_pred)
test_AUROC = roc_auc_score(test_labels, test_softmax[:,1], average="micro")
test_precision, test_recall, thresholds = precision_recall_curve(test_labels, test_softmax[:,1])
test_AUPR = auc(test_recall, test_precision)
# test_F1 = f1_score(test_labels, test_pred, average="micro")
train_scores = forward_scores[train_idx]
train_labels = to_cpu_npy(graph.y[targetNode_mask[train_idx]])
train_softmax, train_pred = model.calc_softmax_pred(train_scores)
train_pred = to_cpu_npy(train_pred)
train_softmax = to_cpu_npy(train_softmax)
train_precision, train_recall, thresholds = precision_recall_curve(train_labels, train_softmax[:,1])
train_AUPR = auc(train_recall, train_precision)
# train_F1 = f1_score(train_labels, train_pred, average="micro")
valid_scores = forward_scores[valid_idx]
valid_labels = to_cpu_npy(graph.y[targetNode_mask[valid_idx]])
valid_softmax, valid_pred = model.calc_softmax_pred(valid_scores)
valid_pred = to_cpu_npy(valid_pred)
valid_softmax = to_cpu_npy(valid_softmax)
valid_precision, valid_recall, thresholds = precision_recall_curve(valid_labels, valid_softmax[:,1])
valid_AUPR = auc(valid_recall, valid_precision)
# valid_F1 = f1_score(valid_labels, valid_pred, average="micro")
return test_AUROC, test_AUPR, test_pred, test_labels, train_AUPR, train_pred, train_labels, \
valid_AUPR, valid_pred, valid_labels
def train_model_regression(model, graph, max_epoch, learning_rate, targetNode_mask, train_idx, valid_idx, optimizer):
'''
Trains model for regression task
Parameters
----------
model [GCN_classification]: Instantiation of model class
graph [PyG Data class]: PyTorch Geometric Data object representing the graph
max_epoch [int]: Maximum number of training epochs
learning_rate [float]: Learning rate
targetNode_mask [tensor]: Subgraph mask for training nodes
train_idx [array]: Node IDs corresponding to training set
valid_idx [array]: Node IDs corresponding to validation set
optimizer [PyTorch optimizer class]: PyTorch optimization algorithm
Returns
-------
train_loss_vec [array]: Training loss for each epoch;
analagous for valid_loss_vec (validation set)
train_pearson_vec [array]: Training PCC for each epoch;
analogous for valid_pearson_vec (validation set)
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
graph = graph.to(device)
optimizer = optimizer
train_labels = to_cpu_npy(graph.y[targetNode_mask[train_idx]])
valid_labels = to_cpu_npy(graph.y[targetNode_mask[valid_idx]])
train_loss_list = []
train_pearson_vec = np.zeros(np.shape(np.arange(max_epoch)))
valid_loss_list = []
valid_pearson_vec = np.zeros(np.shape(np.arange(max_epoch)))
model.train()
train_status = True
print('\n')
for e in list(range(max_epoch)):
if e%100 == 0:
print("Epoch", str(e), 'out of', str(max_epoch))
model.train()
train_status = True
optimizer.zero_grad()
### Only trains on nodes with genes due to masking
forward_scores = model(graph.x.float(), graph.edge_index, train_status)[targetNode_mask]
train_scores = forward_scores[train_idx]
train_loss = model.loss(train_scores, torch.FloatTensor(train_labels).to(device))
train_loss.backward()
optimizer.step()
### Calculate training and validation loss, AUROC scores
model.eval()
train_scores = to_cpu_npy(train_scores)
train_pearson = calc_pearson(train_scores, train_labels)
train_loss_list.append(train_loss.item())
valid_scores = forward_scores[valid_idx]
valid_loss = model.loss(valid_scores, torch.FloatTensor(valid_labels).to(device))
valid_scores = to_cpu_npy(valid_scores)
valid_pearson = calc_pearson(valid_scores, valid_labels)
valid_loss_list.append(valid_loss.item())
train_pearson_vec[e] = train_pearson
valid_pearson_vec[e] = valid_pearson
train_loss_vec = np.reshape(np.array(train_loss_list), (-1, 1))
valid_loss_vec = np.reshape(np.array(valid_loss_list), (-1, 1))
return train_loss_vec, train_pearson_vec, valid_loss_vec, valid_pearson_vec
def eval_model_regression(model, graph, targetNode_mask, train_idx, valid_idx, test_idx):
'''
Runs fully trained regression model and compute evaluation statistics
Parameters
----------
model [GCN_classification]: Instantiation of model class
graph [PyG Data class]: PyTorch Geometric Data object representing the graph
targetNode_mask [tensor]: Mask ensuring model only trains on nodes with genes
train_idx [array]: Node IDs corresponding to training set;
analogous for valid_idx and test_idx
Returns
-------
test_pearson [float]: PCC for test set;
analogous for train_pearson (training set) and valid_pearson (validation set)
test_pred [array]: Test set predictions;
analogous for train_pred (training set) and valid_pred (validation set)
test_labels [array]: Test set labels (expression values);
analagous for train_labels (training set) and valid_labels (validation set)
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
graph = graph.to(device)
model.eval()
train_status=False
forward_scores = model(graph.x.float(), graph.edge_index, train_status)[targetNode_mask]
test_scores = forward_scores[test_idx]
test_pred = to_cpu_npy(test_scores)
test_labels = to_cpu_npy(graph.y[targetNode_mask[test_idx]])
test_pearson = calc_pearson(test_pred, test_labels)
train_scores = forward_scores[train_idx]
train_pred = to_cpu_npy(train_scores)
train_labels = to_cpu_npy(graph.y[targetNode_mask[train_idx]])
train_pearson = calc_pearson(train_pred, train_labels)
valid_scores = forward_scores[valid_idx]
valid_pred = to_cpu_npy(valid_scores)
valid_labels = to_cpu_npy(graph.y[targetNode_mask[valid_idx]])
valid_pearson = calc_pearson(valid_pred, valid_labels)
return test_pearson, test_pred, test_labels, train_pearson, train_pred, train_labels, \
valid_pearson, valid_pred, valid_labels
def calc_pearson(scores, targets):
'''
Calculates Pearson correlation coefficient (PCC) between predicted \
expression levels and true expression levels
Parameters
----------
scores [array]: Predicted expression levels
targets [array]: True expression levels
Returns
-------
pcc [float]: Pearson correlation coefficient
'''
pcc, _ = pearsonr(scores, targets)
return pcc
def to_cpu_npy(x):
'''
Simple helper function to transfer GPU tensors to CPU numpy matrices
Parameters
----------
x [tensor]: PyTorch tensor stored on GPU
Returns
-------
new_x [array]: Numpy array stored on CPU
'''
new_x = x.cpu().detach().numpy()
return new_x
###Set hyperparameters and miscellaneous options
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cell_line', default='E116', type=str)
parser.add_argument('-rf', '--regression_flag', default=1, type=int)
parser.add_argument('-me', '--max_epoch', default=1000,type=int)
parser.add_argument('-lr', '--learning_rate', default=1e-4, type=float)
parser.add_argument('-es', '--embed_layer_size', default=5, type=int)
parser.add_argument('-cn', '--num_graph_conv_layers', default=2, type=int)
parser.add_argument('-gs', '--graph_conv_layer_size', default=256, type=int)
parser.add_argument('-ln', '--num_lin_layers', default=3, type=int)
parser.add_argument('-ls', '--lin_hidden_layer_size', default=256, type=int)
parser.add_argument('-rs', '--random_seed', default=0, type=int)
args = parser.parse_args()
cell_line = args.cell_line
regression_flag = args.regression_flag
max_epoch = args.max_epoch
learning_rate = args.learning_rate
num_graph_conv_layers = args.num_graph_conv_layers
graph_conv_embed_size = args.graph_conv_layer_size
num_lin_layers = args.num_lin_layers
lin_hidden_size = args.lin_hidden_layer_size
random_seed = args.random_seed
chip_res = 10000
hic_res = 10000
num_hm = 6
num_feat = int((hic_res/chip_res)*num_hm)
if regression_flag == 0:
num_classes = 2
task = 'Classification'
else:
num_classes = 1
task = 'Regression'
# random_seed = random.randint(0,10000)
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
###Initialize start time
start_time = time.time()
today = date.today()
mdy = today.strftime("%Y-%m-%d")
clock = datetime.now()
hms = clock.strftime("%H-%M-%S")
hm = clock.strftime("%Hh-%Mm")
hm_colon = clock.strftime("%H:%M")
date_and_time = mdy + '-at-' + hms
###Test for GPU availability
cuda_flag = torch.cuda.is_available()
if cuda_flag:
dev = "cuda"
else:
dev = "cpu"
device = torch.device(dev)
###Load input files
base_path = os.getcwd()
save_dir = os.path.join(base_path, 'data', cell_line, 'saved_runs')
hic_sparse_mat_file = os.path.join(base_path, 'data', cell_line, 'hic_sparse.npz')
np_nodes_lab_genes_file = os.path.join(base_path, 'data', cell_line, \
'np_nodes_lab_genes_reg' + str(regression_flag) + '.npy')
np_hmods_norm_all_file = os.path.join(base_path, 'data', cell_line, \
'np_hmods_norm_chip_' + str(chip_res) + 'bp.npy')
df_genes_file = os.path.join(base_path, 'data', cell_line, 'df_genes_reg' + str(regression_flag) + '.pkl')
df_genes = pd.read_pickle(df_genes_file)
###Print model specifications
print(os.path.basename(__file__))
print('Model date and time:')
print(date_and_time, '\n\n')
print('Cell line:', cell_line)
print('Task:', task)
print('ChIP-seq resolution:', str(chip_res))
print('\n')
print('Training set: 70%')
print('Validation set: 15%')
print('Testing set: 15%')
print('\n')
print('Model hyperparameters: ')
print('Number of epochs:', max_epoch)
print('Learning rate:', learning_rate)
print('Number of graph convolutional layers:', str(num_graph_conv_layers))
print('Graph convolutional embedding size:', graph_conv_embed_size)
print('Number of linear layers:', str(num_lin_layers))
print('Linear hidden layer size:', lin_hidden_size)
###Define model inputs
mat = load_npz(hic_sparse_mat_file)
allNodes_hms = np.load(np_hmods_norm_all_file)
hms = allNodes_hms[:, 1:] #only includes features, not node ids
X = torch.tensor(hms).float().reshape(-1, num_feat)
allNodes = allNodes_hms[:, 0].astype(int)
geneNodes_labs = np.load(np_nodes_lab_genes_file)
geneNodes = geneNodes_labs[:, -2].astype(int)
allLabs = -1*np.ones(np.shape(allNodes))
targetNode_mask = torch.tensor(geneNodes).long()
if regression_flag == 0:
geneLabs = geneNodes_labs[:, -1].astype(int)
allLabs[geneNodes] = geneLabs
Y = torch.tensor(allLabs).long()
else:
geneLabs = geneNodes_labs[:, -1].astype(float)
allLabs[geneNodes] = geneLabs
Y = torch.tensor(allLabs).float()
extract = torch_geometric.utils.from_scipy_sparse_matrix(mat)
data = torch_geometric.data.Data(edge_index = extract[0], edge_attr = extract[1], x = X, y = Y)
G = data
###Define convolutional and linear layer input/output sizes
graph_conv_layer_sizes = [num_feat] + \
[int(max(graph_conv_embed_size, lin_hidden_size)) \
for i in np.arange(1, num_graph_conv_layers, 1)] + [lin_hidden_size]
lin_hidden_sizes = [graph_conv_layer_sizes[-1]] + \
[int(max(lin_hidden_size, num_classes)) \
for i in np.arange(1, num_lin_layers, 1)] + [num_classes]
###Randomize node order and split into 70%/15%/15% training/validation/test sets
pred_idx_shuff = torch.randperm(targetNode_mask.shape[0])
fin_train = np.floor(0.7*pred_idx_shuff.shape[0]).astype(int)
fin_valid = np.floor(0.85*pred_idx_shuff.shape[0]).astype(int)
train_idx = pred_idx_shuff[:fin_train]
valid_idx = pred_idx_shuff[fin_train:fin_valid]
test_idx = pred_idx_shuff[fin_valid:]
train_gene_ID = targetNode_mask[train_idx].numpy()
valid_gene_ID = targetNode_mask[valid_idx].numpy()
test_gene_ID = targetNode_mask[test_idx].numpy()
###Instantiate neural network model, choose optimizer, and print model parameters
if regression_flag == 0:
model = GCN_classification(num_feat, num_graph_conv_layers, graph_conv_layer_sizes, num_lin_layers, lin_hidden_sizes, num_classes)
else:
model = GCN_regression(num_feat, num_graph_conv_layers, graph_conv_layer_sizes, num_lin_layers, lin_hidden_sizes, num_classes)
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, model.parameters()),
lr = learning_rate)
print("\n"+"Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
### For classification:
if regression_flag == 0:
### Train model
train_loss_vec, train_AUROC_vec, valid_loss_vec, valid_AUROC_vec = \
train_model_classification(model, G, max_epoch, learning_rate, targetNode_mask, train_idx, valid_idx, optimizer)
### Evaluate model
test_AUROC, test_AUPR, test_pred, test_labels, train_AUPR, train_pred, train_labels, \
valid_AUPR, valid_pred, valid_labels = \
eval_model_classification(model, G, targetNode_mask, train_idx, valid_idx, test_idx)
### Save metrics and node predictions
train_metrics = [train_gene_ID, train_pred, train_labels, train_AUROC_vec, train_AUPR, train_loss_vec]
np.save(os.path.join(save_dir, 'model_' + date_and_time + '_train_metrics' + '.npy'), train_metrics)
valid_metrics = [valid_gene_ID, valid_pred, valid_labels, valid_AUROC_vec, valid_AUPR, valid_loss_vec]
np.save(os.path.join(save_dir, 'model_' + date_and_time + '_valid_metrics' + '.npy'), valid_metrics)
test_metrics = [test_gene_ID, test_pred, test_labels, test_AUROC, test_AUPR, ['na']]
np.save(os.path.join(save_dir, 'model_' + date_and_time + '_test_metrics' + '.npy'), test_metrics)
dataset_list = [train_metrics, valid_metrics, test_metrics]
df_full_metrics = pd.DataFrame(columns=['Dataset','Node ID','True Label','Predicted Label','Classification'])
for d in np.arange(len(dataset_list)):
dataset_metrics = dataset_list[d]
partial_metrics = pd.DataFrame()
partial_metrics['Node ID'] = dataset_metrics[0]
partial_metrics['True Label'] = dataset_metrics[2]
partial_metrics['Predicted Label'] = dataset_metrics[1]
partial_metrics['Classification'] = dataset_metrics[1]*1 + dataset_metrics[2]*2
partial_metrics['Classification'].replace(to_replace=0, value='TN', inplace=True)
partial_metrics['Classification'].replace(to_replace=1, value='FP', inplace=True)
partial_metrics['Classification'].replace(to_replace=2, value='FN', inplace=True)
partial_metrics['Classification'].replace(to_replace=3, value='TP', inplace=True)
if d == 0:
partial_metrics['Dataset'] = 'Training'
elif d == 1:
partial_metrics['Dataset'] = 'Validation'
elif d == 2:
partial_metrics['Dataset'] = 'Testing'
df_full_metrics = df_full_metrics.append(partial_metrics)
df_gene_names = df_genes.iloc[:,:3]
df_gene_names = df_gene_names.rename(columns={"gene_catalog_name": "ENSEMBL_ID", "abbrev": "Abbreviation",
"hic_node_id" : 'Node ID'})
df_full_metrics = pd.merge(df_full_metrics, df_gene_names, how='inner', on='Node ID')
df_full_metrics = df_full_metrics[df_full_metrics.columns[[0,1,5,6,2,3,4]]]
### For regression:
elif regression_flag == 1:
### Train model
train_loss_vec, train_pearson_vec, valid_loss_vec, valid_pearson_vec = \
train_model_regression(model, G, max_epoch, learning_rate, targetNode_mask, train_idx, valid_idx, optimizer)
### Evaluate model
test_pearson, test_pred, test_labels, train_pearson, train_pred, train_labels, \
valid_pearson, valid_pred, valid_labels = \
eval_model_regression(model, G, targetNode_mask, train_idx, valid_idx, test_idx)
### Save metrics and node predictions
train_metrics = [train_gene_ID, train_pred, train_labels, train_pearson_vec, train_loss_vec]
np.save(os.path.join(save_dir, 'model_' + date_and_time + '_train_metrics' + '.npy'), train_metrics)
valid_metrics = [valid_gene_ID, valid_pred, valid_labels, valid_pearson_vec, valid_loss_vec]
np.save(os.path.join(save_dir, 'model_' + date_and_time + '_valid_metrics' + '.npy'), valid_metrics)
test_metrics = [test_gene_ID, test_pred, test_labels, test_pearson, ['na']]
np.save(os.path.join(save_dir, 'model_' + date_and_time + '_test_metrics' + '.npy'), test_metrics)
dataset_list = [train_metrics, valid_metrics, test_metrics]
df_full_metrics = pd.DataFrame(columns=['Dataset','Node ID','True Label','Predicted Label'])
for d in np.arange(len(dataset_list)):
dataset_metrics = dataset_list[d]
partial_metrics = pd.DataFrame()
partial_metrics['Node ID'] = dataset_metrics[0]
partial_metrics['True Label'] = dataset_metrics[2]
partial_metrics['Predicted Label'] = dataset_metrics[1]
if d == 0:
partial_metrics['Dataset'] = 'Training'
elif d == 1:
partial_metrics['Dataset'] = 'Validation'
elif d == 2:
partial_metrics['Dataset'] = 'Testing'
df_full_metrics = df_full_metrics.append(partial_metrics)
df_gene_names = df_genes.iloc[:,:3]
df_gene_names = df_gene_names.rename(columns={"gene_catalog_name": "ENSEMBL_ID", "abbrev": "Abbreviation",
"hic_node_id" : 'Node ID'})
df_full_metrics = pd.merge(df_full_metrics, df_gene_names, how='inner', on='Node ID')
df_full_metrics = df_full_metrics[df_full_metrics.columns[[0,1,4,5,2,3]]]
### Print elapsed time and performance
elapsed = (time.time() - start_time)
elapsed_h = int(elapsed//3600)
elapsed_m = int((elapsed - elapsed_h*3600)//60)
elapsed_s = int(elapsed - elapsed_h*3600 - elapsed_m*60)
print('Elapsed time: {0:02d}:{1:02d}:{2:02d}'.format(elapsed_h, elapsed_m, elapsed_s))
print('\nPerformance:')
if regression_flag == 0:
print('Test AUROC:', test_AUROC, '\n')
print('Test AUPR:', test_AUPR, '\n\n')
elif regression_flag == 1:
print('Test pearson:', test_pearson, '\n')
### Save trained model parameters, model predictions CSV file, model performance/information
model_path = os.path.join(save_dir, 'model_' + date_and_time + '.pt')
torch.save(model.state_dict(), model_path)
df_full_metrics_filename = os.path.join(save_dir, 'model_predictions_' + date_and_time + '.csv')
df_full_metrics.to_csv(df_full_metrics_filename, index=False)
model_info_filename = os.path.join(save_dir,'model_' + date_and_time + '_info.txt')
f = open(model_info_filename, 'w')
f.write('File name: ' + os.path.basename(__file__) + '\n')
f.write('Model reference date and time: ' + date_and_time + '\n\n')
f.write('Start date: ' + mdy + '\n')
f.write('Start time: ' + hm_colon + '\n')
f.write('Total time: {0:02d}:{1:02d}:{2:02d}'.format(elapsed_h, elapsed_m, elapsed_s))
f.write('\n\n')
f.write('Task: ' + task + '\n')
f.write('Cell line: ' + cell_line + '\n')
f.write('Dataset split:\n')
f.write('Training set: 70%' + '\n')
f.write('Validation set: 15%' + '\n')
f.write('Testing set: 15%' + '\n\n')
f.write('Performance:\n')
if regression_flag == 0:
f.write('Test AUROC: ' + str(test_AUROC) + '\n')
f.write('Test AUPR: ' + str(test_AUPR) + '\n\n')
elif regression_flag == 1:
f.write('Test PCC: ' + str(test_pearson) + '\n\n')
f.write('Hyperparameters:\n')
f.write('Number of epochs: ' + str(max_epoch) + '\n')
f.write('Learning rate :' + str(learning_rate) + '\n')
f.write('Number of graph convolutional layers: ' + str(num_graph_conv_layers) + '\n')
f.write('Graph convolutional layer size: ' + str(graph_conv_embed_size) + '\n')
f.write('Number of linear layers: ' + str(num_lin_layers) + '\n')
f.write('Linear hidden layer size: ' + str(lin_hidden_size) + '\n\n')
f.write('Model\'s state_dict:\n')
for param_tensor in model.state_dict():
f.write(str(param_tensor) + "\t" + str(model.state_dict()[param_tensor].size()) + '\n')
f.close()
|
class Animal: #모든 동물의 공통 기능
def __init__(self, weight, sound):
self.weight = weight
self.sound = sound
def sleep(self):
print("코 잔다")
def speak(self):
print(self.sound)
def eat(self):
print("먹는다")
def show(self):
print("동물 : %.2fkg"%self.weight)
class Cat(Animal):
def eat(self):
print("츄르를 먹는다")
def show(self):
print("고양이 : %.2fkg"%self.weight)
class Dog(Animal):
def eat(self):
print("개껌을 먹는다")
def show(self):
print("개 : %.2fkg"%self.weight)
jinwoo = Animal(66, "진우진우")
jinwoo.sleep()
jinwoo.eat()
jinwoo.speak()
jinwoo.show()
print()
cheeze = Cat(3.5, "냥냥")
cheeze.sleep()
cheeze.eat()
cheeze.speak()
cheeze.show()
print()
dangdang = Dog(6.5, "멍멍")
dangdang.sleep()
dangdang.eat()
dangdang.speak()
dangdang.show()
print()
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the BigqueryDao."""
import json
from tests.unittest_utils import ForsetiTestCase
import mock
import unittest
from MySQLdb import DataError
from google.cloud.security.common.data_access import _db_connector
from google.cloud.security.common.data_access import errors
from google.cloud.security.common.data_access import bigquery_dao
from google.cloud.security.common.data_access.sql_queries import select_data
class BigqueryDaoTest(ForsetiTestCase):
"""Tests for the BigqueryDao."""
@mock.patch.object(_db_connector.DbConnector, '__init__', autospec=True)
def setUp(self, mock_db_connector):
mock_db_connector.return_value = None
self.bigquery_dao = bigquery_dao.BigqueryDao()
self.resource_name = 'bigquery_datasets'
self.fake_timestamp = '12345'
def test_get_bigquery_acls(self):
"""Test get_bigquery_acls()."""
conn_mock = mock.MagicMock()
cursor_mock = mock.MagicMock()
fetch_mock = mock.MagicMock()
self.bigquery_dao.conn = conn_mock
self.bigquery_dao.conn.cursor.return_value = cursor_mock
cursor_mock.fetchall.return_value = fetch_mock
fake_query_acls = select_data.BIGQUERY_ACLS.format(
self.fake_timestamp)
self.bigquery_dao.get_bigquery_acls(
self.resource_name,
self.fake_timestamp)
cursor_mock.execute.assert_called_once_with(fake_query_acls, None)
cursor_mock.fetchall.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
"""
Problem Statement
Shashank likes strings in which consecutive characters are different. For
example, he likes ABABA, while he doesn't like ABAA. Given a string containing
characters A and B only, he wants to change it into a string he likes. To do
this, he is allowed to delete the characters in the string.
Your task is to find the minimum number of required deletions.
Input Format
The first line contains an integer T i.e. the number of test cases.
Next T lines contain a string each.
Output Format
Print minimum number of required steps for each test case.
Constraints
1≤T≤10
1≤lengthofString≤10^5
Sample Input
5
AAAA
BBBBB
ABABABAB
BABABA
AAABBB
Sample Output
3
4
0
0
4
Explanation
AAAA⟹A, 3 deletions
BBBBB⟹B, 4 deletions
ABABABAB⟹ABABABAB, 0 deletions
BABABA⟹BABABA, 0 deletions
AAABBB⟹AB, 4 deletions
"""
from datetime import datetime
def timedcall(fn):
"""
Call function, print the time taken and return result.
"""
def wrapper(*args):
t0 = datetime.now()
result = fn(*args)
t1 = datetime.now() - t0
print("Time taken:\t{0}".format(t1))
return result
return wrapper
def kill_duplicates(startpos, data_list):
dist = 0
l = len(data_list)
while (startpos+dist+1 < l) and (data_list[startpos] == data_list[startpos+dist+1]):
dist += 1
del data_list[startpos:startpos+dist]
return dist
@timedcall
def calc_alternate_distance(s):
if len(s) <= 1:
return 0
slist = list(s)
idx = 0
dist = 0
while idx < len(slist)-1:
dist += kill_duplicates(idx, slist)
idx += 1
return dist
# ---------------------------------------------------------------------------- #
'''
T = int(input()) # number of test cases
tests = []
for i in range(T):
N = input() # number of cycles in test scenario
tests.append(N)
'''
tests = ['AAAA', 'BBBBB', 'ABABABAB', 'BABABA', 'AAABBB']
test = ""
for i in range(50000):
test += "BBBCC"
tests.append(test)
start_time = datetime.now()
for i in tests:
print(calc_alternate_distance(i))
time_taken = datetime.now() - start_time
print("--->\tAll Time taken: {0}".format(time_taken))
|
"""Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add 12 switches
switch1 = self.addSwitch('s1')
switch2 = self.addSwitch('s2')
switch3 = self.addSwitch('s3')
switch4 = self.addSwitch('s4')
switch5 = self.addSwitch('s5')
switch6 = self.addSwitch('s6')
switch7 = self.addSwitch('s7')
switch8 = self.addSwitch('s8')
switch9 = self.addSwitch('s9')
switch10 = self.addSwitch('s10')
switch11 = self.addSwitch('s11')
switch12 = self.addSwitch('s12')
# Add 12 hosts
# Each host connect to the corresponding switch
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
h7 = self.addHost('h7')
h8 = self.addHost('h8')
h9 = self.addHost('h9')
h10 = self.addHost('h10')
h11 = self.addHost('h11')
h12 = self.addHost('h12')
# Add links between switches, followed Abilene Topology
self.addLink(switch1, switch2)
self.addLink(switch2, switch5)
self.addLink(switch2, switch6)
self.addLink(switch2, switch12)
self.addLink(switch9, switch12)
self.addLink(switch9, switch3)
self.addLink(switch3, switch6)
self.addLink(switch6, switch7)
self.addLink(switch7, switch4)
self.addLink(switch4, switch11)
self.addLink(switch11, switch10)
self.addLink(switch10, switch8)
self.addLink(switch8, switch5)
self.addLink(switch7, switch5)
self.addLink(switch4, switch10)
# Add links between host and switch
self.addLink(h1, switch1)
self.addLink(h2, switch2)
self.addLink(h3, switch3)
self.addLink(h4, switch4)
self.addLink(h5, switch5)
self.addLink(h6, switch6)
self.addLink(h7, switch7)
self.addLink(h8, switch8)
self.addLink(h9, switch9)
self.addLink(h10, switch10)
self.addLink(h11, switch11)
self.addLink(h12, switch12)
topos = { 'mytopo': ( lambda: MyTopo() ) }
|
""" Generates random dice rolls, nothing special just a randint wrapper """
from random import randint
# Can't roll less than 1 die
MIN_DICE_COUNT = 1
# 10 is a pretty sensible maximum per turn
MAX_DICE_COUNT = 10
# Cannot have a 1 sided die
MIN_DICE_FACE_COUNT = 2
# Largest you'd ever want to roll is a d100
MAX_DICE_FACE_COUNT = 100
#Emulate rolling a set of dice
def rollDice(dice_num : int, face_count : int) -> int:
#Sanity checking, no more than 10 d100 dice
if dice_num > MAX_DICE_COUNT or face_count > MAX_DICE_FACE_COUNT or dice_num < MIN_DICE_COUNT or face_count < MIN_DICE_FACE_COUNT:
return None
total = 0
for die in range(0, dice_num):
#1 to face_count random ints
result = randint(1, face_count)
total += result
return total
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.