content
stringlengths 5
1.05M
|
|---|
import json
def readJsonFile(fileName):
json_data = open(fileName).read()
data = json.loads(json_data)
return data
def writeJsonFile(fileName,data):
with open(fileName,'w+') as file:
json.dump(data,file,indent=4)
|
#!/usr/bin/env python
import sys, getopt
import glob
import math
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib.patches import Ellipse
import mpl_toolkits.axisartist as AA
from mpl_toolkits.axes_grid1 import host_subplot
from scipy.interpolate import interp1d
from scipy.interpolate import splev, splrep
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import integrate
import os, os.path
import scipy.optimize
import re
import logging
from astropy.io import fits
from datetime import datetime
import warnings;warnings.filterwarnings('ignore')
def usage(__cmd__):
print 'USAGE:'
print ' '+os.path.basename(__cmd__)+' [-uh] [--imgout] <MODE> <TAB>'
print 'PARAMETERS:'
print ' MODE Analysis mode(calc/corr).'
print ' EVTFITS Events FITS file'
print ' TAB Data table of day-phi.'
print ' PORB Orbital period(day).'
print ' ECC Eccentricity of orbital.'
print ' ASINI Projected semimajor axis(lt-s).'
print ' OMG Longitude at the PERAST(degree).'
print ' SPCONJ Superior conjunction of MJD(day).'
print ' PERAST Periastron passage of MJD(day)'
print 'Following descripts show parameters that you must input for each analysis mode.'
print ' To calculate day-phi tabel: <MODE>=CALC'
print ' <TAB> <PORB> <ECC>'
print ' To plot day-phi tabel: <MODE>=TPLT'
print ' <TAB> <PORB>'
print ' To conduct binary orbital correction <MODE>=CORR'
print ' <TAB> <PORB> <ASIN> <ECC> <OMG> <ORBORG|PERAST> <EVTFITS>'
sys.exit()
def setparam(__input__):
try:
opts, args = getopt.getopt(__input__[1:], "hu",
[ "help", "imgout",
"PORB=","porb=","ECC=","ecc=",
"SPCONJ=","SPCONJ=","PERAST=","perast=",
"ASINI=","asini=","OMG=","omg=",
"TAB=","tab=","EVTFITS=","evtfits=",
"mode=","MODE=" ])
except getopt.GetoptError, err:
print str(err) # will print something like "option -a not recognized"
sys.exit()
#print 'opts',opts ##DEBUG
#print 'args',args ##DEBUG
cmd = __input__[0]
mode = None
porb = None
ecc = None
spconj = None
perast = None
asin = None
omg = None
tab = None
evt = None
imgout = False
for o, a in opts:
if o in ("-u","-h","--help"):
usage(cmd)
elif o in ("--imgout"):
imgout = True
elif o in ("--mode","--MODE"):
mode = a
elif o in ("--porb","--PORB"):
porb = a
elif o in ("--ecc","--ECC"):
ecc = a
elif o in ("--spconj","--SPCONJ"):
spconj = a
elif o in ("--perast","--PERAST"):
perast = a
elif o in ("--asini","--ASINI"):
asin = a
elif o in ("--omg","--OMG"):
omg = a
elif o in ("--evtfits","--EVTFITS"):
evt = a
elif o in ("--tab","--TAB"):
tab = a
else:
assert False, "unhandled option"
# ...
return cmd,mode,porb,ecc,spconj,perast,asin,omg,tab,evt,imgout
class correct:
def __init__(self,__porb__,__ecc__,__spconj__,__perast__,__asin__,__omg__,__tab__,__evt__,__imgout__):
self.num = 5000
self.dpi = 200
self.imgout = __imgout__
if __porb__ is not None:
self.porb = float(__porb__)
self.N = 2*np.pi/self.porb
if __ecc__ is not None:
self.ecc = float(__ecc__)
if __porb__ is not None and __ecc__ is not None:
self.const = ((1-self.ecc**2)**1.5)/self.N
if __spconj__ is not None:
self.spconj = float(__spconj__)
if __perast__ is not None:
self.perast = float(__perast__)
self.spconj = None
if __asin__ is not None:
self.asin = float(__asin__)
if __asin__ is not None and __ecc__ is not None:
self.b = self.asin*np.sqrt(1-self.ecc**2)
self.c = self.asin*self.ecc
if __omg__ is not None:
self.omg = float(__omg__)
if __tab__ is not None:
self.tabfile = str(__tab__)
if __evt__ is not None:
self.evt = str(__evt__)
self.tmpdf = pd.DataFrame({'ang(radian)':[],
'tim(day)':[],
'phi(phase)':[] })
##--------------------------------------------------------------------------
## make tim(day)-phi(rad) table
##--------------------------------------------------------------------------
def infunc(self,phi):
y = (1.0+self.ecc*np.cos(phi))**(-2)
return y
def integrate(self,phi):
t = self.const*integrate.quad(self.infunc, 0.0, phi, limit=1000000)[0]
return t
def calc_tab(self):
phista=0.0*np.pi
phistp=4.0*np.pi
#print 'phi start %s'%(str(phista)) #DEBUG
#print 'phi stop %s'%(str(phistp)) #DEBUG
dp=(phistp-phista)/self.num
self.tmpdf['ang(radian)'] = phista+np.arange(0,self.num,1)*dp
for i in np.arange(0,self.num,1):
self.tmpdf['tim(day)'][i] = self.integrate(self.tmpdf['ang(radian)'][i])
self.tmpdf['phi(phase)'] = self.tmpdf['tim(day)']/self.porb
self.tmpdf.to_csv(self.tabfile)
print '%s was generated' %(str(self.tabfile))
#print self.tmpdf
##--------------------------------------------------------------------------
## plot tim(day)-phi(rad) table
##--------------------------------------------------------------------------
def read_tab(self):
if os.path.exists(self.tabfile) is True:
self.table = pd.read_csv(self.tabfile,
header = 0,
names=['ang','phi','day']
)
#print self.table
else:
print '%s does not exist!' %(str(self.tabfile))
sys.exit()
def phi2ang(self,p):
ang = interp1d(self.table['phi'], self.table['ang'])
ang_supplement = ang(p)
return ang_supplement
def deg2phi(self,deg):
ang = np.radians(float(deg))
phi = interp1d(self.table['ang'], self.table['phi'])
phi_supplement = phi(ang)
return float(phi_supplement)
def plt_tab(self):
tmp_day = np.arange(0.0, self.porb*1, 1)
tmp_phi = tmp_day/self.porb
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_axes((0.1, 0.1, 0.85, 0.8))
ax2 = ax1.twiny()
ax1.plot(self.table['day'],self.table['ang'],
label="ellipsoidal",color="r",
ls='-',lw=1,zorder=2)
ax1.plot(self.table['day'],self.table['day']*self.N,
label="circular",color="green",
ls='--',lw=1,zorder=1)
ax1.scatter(tmp_day,self.phi2ang(tmp_phi),
label="interpolation",color="b",
marker='o',s=5,zorder=3)
x1ticks = np.arange(0.0, round(self.porb)*2, round(round(self.porb)*0.1,1))
x2ticks = np.arange(0.0, 1.1, 0.1)
y1ticks = np.arange(0.0, 2*np.pi+0.1, 0.25*np.pi)
xlim = [ -0.05, 1.05 ] #orbital phase
ylim = [ -0.1, 2*np.pi+0.1 ] #angle in radian
ax1.grid(True)
ax1.xaxis.set_ticks(x1ticks)
ax1.yaxis.set_ticks(y1ticks)
ax1.set_yticklabels(['0.0','','0.5','','1.0','','1.5','','2.0'])
ax1.set_xlim(xlim[0]*self.porb, xlim[1]*self.porb)
ax1.set_ylim(ylim[0], ylim[1])
ax2.xaxis.set_ticks(x2ticks)
ax2.set_xticklabels(['0.0','','0.2','','0.4','','0.6','','0.8','','1.0',''])
ax2.set_xlim(xlim[0], xlim[1])
ax2.set_ylim(ylim[0], ylim[1])
ax2.set_xlabel(r'orbital phase')
ax1.set_xlabel(r'day from periastron passing time')
ax1.set_ylabel(r'$\phi$($\pi$ radian)')
ax1.tick_params(axis='both',which='both',direction='in')
ax1.legend(fontsize=8.0, loc=4, scatterpoints=3, numpoints=1,
fancybox=True, framealpha=1.0)
if self.imgout is True:
outimg = self.tabfile.replace('tab','').replace('.csv','_tab.png')
plt.savefig(outimg, format='png', dpi=self.dpi , transparent=True)
print(outimg+' is generated')
else:
plt.pause(1.0)
plt.show()
##--------------------------------------------------------------------------
## plot orbital
##--------------------------------------------------------------------------
def orbital(self,phi):
r = self.asin*(1-self.ecc**2)/(1+self.ecc*np.cos(phi))
return r
def spconj2perast(self):
self.angspconj = 90.0-self.omg #in unit of degree
if self.angspconj < 0.0:
self.angspconj = self.angspconj + 360
self.phispconj = self.deg2phi(self.angspconj) #in unit of phase
self.perast = self.spconj-self.porb*self.phispconj
def plt_orb(self):
ms = 1; ma = '.'
sms = 3; sma = 'o'
lw = 1; ls = '-'; pls = '--'
hl = self.asin/15.; hw = self.asin/15.
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.1, right=0.9,
top=0.9, bottom=0.1,
wspace=0.1, hspace=0.1)
ax = fig.add_subplot(111)
# the center of the ellipse
x = -1*self.c*np.cos(np.radians(self.omg))
y = -1*self.c*np.sin(np.radians(self.omg))
ax.add_artist(Ellipse(xy=[x,y],width=2.0*self.asin,height=2.0*self.b,
angle=self.omg,facecolor='none'))
# the focus
xf = [x+self.c*np.cos(np.radians(self.omg)),
x-self.c*np.cos(np.radians(self.omg))]
yf = [y+self.c*np.sin(np.radians(self.omg)),
y-self.c*np.sin(np.radians(self.omg))]
ax.plot(xf,yf,'xb', zorder=2)
# orbital phase
orb_lll = self.asin*(1-self.ecc**2)
orb_phi = np.arange(0.0,1.0,0.1)
orb_cnf = map(str,list(orb_phi))
if spconj is not None:
orb_phi = orb_phi+self.phispconj
mjr_phi = np.arange(0.0,1.0,0.5)
mjr_ang = self.phi2ang(mjr_phi)
mjr_rad = orb_lll/(1+self.ecc*np.cos(mjr_ang))
mxr = mjr_rad*np.cos(mjr_ang)
myr = mjr_rad*np.sin(mjr_ang)
mxrp = mxr*np.cos(np.radians(self.omg))-myr*np.sin(np.radians(self.omg))
myrp = mxr*np.sin(np.radians(self.omg))+myr*np.cos(np.radians(self.omg))
for q in range(0,len(mjr_phi)):
ax.plot([xf[0], xf[0]+mxrp[q]],[yf[0], yf[0]+myrp[q]],
color='blue',ls='--',lw=1.5,zorder=1)
orb_ang = self.phi2ang(orb_phi)
orb_rad = orb_lll/(1+self.ecc*np.cos(orb_ang))
# converting the radius based on the focus
# into x,y coordinates on the ellipse:
xr = orb_rad*np.cos(orb_ang)
yr = orb_rad*np.sin(orb_ang)
# accounting for the rotation by anlge:
xrp = xr*np.cos(np.radians(self.omg))-yr*np.sin(np.radians(self.omg))
yrp = xr*np.sin(np.radians(self.omg))+yr*np.cos(np.radians(self.omg))
# put labels outside the "rays"
offset = self.asin*0.1
rLabel = orb_rad+offset
xrl = rLabel*np.cos(orb_ang)
yrl = rLabel*np.sin(orb_ang)
xrpl = xrl*np.cos(np.radians(self.omg)) - yrl*np.sin(np.radians(self.omg))
yrpl = xrl*np.sin(np.radians(self.omg)) + yrl*np.cos(np.radians(self.omg))
tlabel = np.degrees(orb_ang-np.pi)
for q in range(0,len(tlabel)):
if tlabel[q] >= 180.0:
tlabel[q] -= 180.0
for q in range(0,len(orb_phi)):
ax.plot([xf[0], xf[0]+xrp[q]],[yf[0], yf[0]+yrp[q]],
color='black',ls='--',lw=1,zorder=1)
ax.text(xf[0]+xrpl[q],yf[0]+yrpl[q],orb_cnf[q],
va='center', ha='center',
rotation=self.omg+tlabel[q])
# put an arrow to "Earth"
arrowfactor = -1.3
ax.arrow(0, 0, 0, arrowfactor*self.asin,
head_length=hl, head_width=hw,
edgecolor='black', facecolor='black', lw=1, zorder=1)
ax.text(x=1E-01*self.asin,y=arrowfactor*self.asin,
s='To Earth', fontsize=12.0, va='top', ha='left')
# plot observation
angle = self.timdata[(self.timdata['DFT0']>0.0)]['ANG']
self.timdata['r'] = orb_lll/(1+self.ecc*np.cos(angle))
self.timdata['xr'] = self.timdata['r']*np.cos(angle)
self.timdata['yr'] = self.timdata['r']*np.sin(angle)
self.timdata['rxr'] = self.timdata['xr']*np.cos(np.radians(self.omg)) - self.timdata['yr']*np.sin(np.radians(self.omg))
self.timdata['ryr'] = self.timdata['xr']*np.sin(np.radians(self.omg)) + self.timdata['yr']*np.cos(np.radians(self.omg))
ax.scatter(self.timdata['rxr'], self.timdata['ryr'],
label='Suzaku',color='red',
marker=sma, s=sms, zorder=3)
# observation phase-----------------------------------------------------
"""
obs_phi = np.array( [ 0.19, 0.38 ] )
obs_cnf = [ 'suzaku', 'suzaku' ]
obs_ang = self.phi2ang(obs_phi)
obs_rad = orb_lll/(1+self.ecc*np.cos(obs_ang))
color = [ 'red', 'red' ]
obs_xr = obs_rad*np.cos(obs_ang)
obs_yr = obs_rad*np.sin(obs_ang)
obs_xrp = obs_xr*np.cos(np.radians(self.omg))-obs_yr*np.sin(np.radians(self.omg))
obs_yrp = obs_xr*np.sin(np.radians(self.omg))+obs_yr*np.cos(np.radians(self.omg))
obs_rLabel = obs_rad+offset
obs_xrl = obs_rLabel*np.cos(obs_ang)
obs_yrl = obs_rLabel*np.sin(obs_ang)
obs_xrpl = obs_xrl*np.cos(np.radians(self.omg)) - obs_yrl*np.sin(np.radians(self.omg))
obs_yrpl = obs_xrl*np.sin(np.radians(self.omg)) + obs_yrl*np.cos(np.radians(self.omg))
obs_tlabel = np.degrees(obs_ang-np.pi)
for q in range(0,len(obs_tlabel)):
if obs_tlabel[q] >= 180.0:
obs_tlabel[q] -= 180.0
for x,y,xp,yp,tl,t,c in zip(obs_xrp,obs_yrp,obs_xrpl,obs_yrpl,obs_tlabel,obs_cnf,color):
ax.plot([xf[0], xf[0]+x], [yf[0], yf[0]+y],
color=c, ls=pls, lw=lw, ms=ms, marker=ma, zorder=1)
ax.text(x=xf[0]+xp,y=yf[0]+yp,
rotation= self.omg+tl,
s=t, fontsize=12.0, va='center', ha='center', color='black')
"""
# observation phase END-------------------------------------------------
# celestial plane
ax.plot([np.radians(self.omg-270),np.radians(self.omg-90)],
[self.asin*2,self.asin*2],
color='black', ls=ls, lw=lw, ms=ms, marker=ma, zorder=1)
ax.grid(True)
limin=-2000;limax=2000
axis_tick = np.arange(limin,limax,50)
axis_conf = map(str,list(axis_tick))
ax.xaxis.set_ticks(axis_tick)
ax.set_xticklabels(axis_conf)
ax.yaxis.set_ticks(axis_tick)
ax.set_yticklabels(axis_conf)
scale = 1.8
ax.set_xlim(-1*scale*self.asin,scale*self.asin)
ax.set_ylim(-1*scale*self.asin,scale*self.asin)
ax.set_xlabel('[lt-s]')
ax.set_ylabel('[lt-s]')
ax.yaxis.set_label_coords(-0.085, 0.5)
#ax.legend(fontsize=8.0,loc=2,scatterpoints=1, fancybox=True, framealpha=1.0)
ax.axhline(y=0, xmin=limin, xmax=limax, color='green', ls='--', lw=1.5)
fig.suptitle(self.tabfile.replace('.csv',''))
if self.imgout is True:
outimg = self.evt.replace('.evt','_orb.png')
plt.savefig(outimg, format='png', dpi=self.dpi , transparent=True)
print(outimg+' is generated')
else:
plt.pause(1.0)
plt.show()
##--------------------------------------------------------------------------
def coraetime(self,aetime):
##--------------------------------------------------------------------------
day = aetime/86400+self.mjdref #;print self.aetime
dft0 = day-self.perast #;print self.dft0
phi = dft0/self.porb #;print self.phi
phii = int(phi) #;print self.phii
phif = phi-phii #;print self.phif
ang = self.phi2ang(phif) #;print self.ang
dtim = self.orbital(ang)*np.sin(ang+np.radians(self.omg)) #;print self.dtim
ctim = aetime-dtim
return ctim,dtim
##--------------------------------------------------------------------------
def mkcorevt(self):
##--------------------------------------------------------------------------
self.hdulist = fits.open(self.evt)
self.ext0dat = self.hdulist[0].data
self.ext0hed = self.hdulist[0].header
self.ext1dat = self.hdulist[1].data
self.ext1hed = self.hdulist[1].header
self.ext2dat = self.hdulist[2].data
self.ext2hed = self.hdulist[2].header
self.hdulist.close()
self.mjdrefi = self.ext0hed['MJDREFI']
self.mjdreff = self.ext0hed['MJDREFF']
self.mjdref = self.mjdrefi+self.mjdreff #ORIGIN OF SUZAKU TIME
## correct TSTART and TSTOP keywords included in header of EVENTS FITS file
self.tsta = self.ext0hed['TSTART']
self.tstp = self.ext0hed['TSTOP']
self.ctsta,self.dtsta = self.coraetime(self.tsta)
self.ctstp,self.dtstp = self.coraetime(self.tstp)
self.target = str(self.ext0hed['OBJECT'])
self.obsid = str(self.ext0hed['OBS_ID'])
##----------------------------------------------------------------------
## correct extension#1 of EVENTS FITS file
##----------------------------------------------------------------------
## correction of TIME column
self.timdata = pd.DataFrame({'TIME':self.ext1dat['TIME']})
self.timdata['DAY'] = self.mjdref+self.timdata['TIME']/86400
self.timdata['DFT0'] = self.timdata['DAY']-self.perast
self.timdata['PHI'] = self.timdata['DFT0']/self.porb
self.timdata['PHII'] = self.timdata[['PHI']].astype(int)
self.timdata['PHIF'] = self.timdata['PHI']-self.timdata['PHII']
self.timdata['PHIF'].where(self.timdata['DFT0']>0.0,0.0,inplace=True)
self.timdata['ANG'] = self.phi2ang(self.timdata['PHIF'])
self.timdata['ANG'].where(self.timdata['DFT0']>0.0,0.0,inplace=True)
self.timdata['DTIM'] = self.orbital(self.timdata['ANG'])\
*np.sin(self.timdata['ANG']+np.radians(self.omg))
self.timdata['CTIM'] = self.timdata['TIME']-self.timdata['DTIM']
self.timdataminday = int(self.timdata[(self.timdata['DFT0']>0.0)]['DAY'].min())
self.ext1dat['TIME'] = self.timdata['CTIM']
## correction of header keywords of extension#1
self.ext1hed['TSTART'] = self.ctsta
self.ext1hed['TSTOP'] = self.ctstp
self.ext1hed['TSTABOCA'] = self.dtsta
self.ext1hed.comments['TSTABOCA'] = 'binary-orbit correction amount for TSTART'
self.ext1hed['TSTPBOCA'] = self.dtstp
self.ext1hed.comments['TSTPBOCA'] = 'binary-orbit correction amount for TSTOP'
fits.writeto('borbcorpy_ext1.evt',self.ext1dat,self.ext1hed,clobber=True)
##----------------------------------------------------------------------
## correct extension#2 of EVENTS FITS file
##----------------------------------------------------------------------
## correction of START & STOP columns
self.gtidata = pd.DataFrame({'TSTA':self.ext2dat['START'],
'TSTP':self.ext2dat['STOP']})
self.gtidata['DSTA'] = self.mjdref+self.gtidata['TSTA']/86400.0
self.gtidata['DSTP'] = self.mjdref+self.gtidata['TSTP']/86400.0
self.gtidata['DFT0STA'] = self.gtidata['DSTA']-self.perast
self.gtidata['DFT0STP'] = self.gtidata['DSTP']-self.perast
self.gtidata['PSTA'] = self.gtidata['DFT0STA']/self.porb
self.gtidata['PSTP'] = self.gtidata['DFT0STP']/self.porb
self.gtidata['PISTA'] = self.gtidata[['PSTA']].astype(int)
self.gtidata['PISTP'] = self.gtidata[['PSTP']].astype(int)
self.gtidata['PFSTA'] = self.gtidata['PSTA']-self.gtidata['PISTA']
self.gtidata['PFSTP'] = self.gtidata['PSTP']-self.gtidata['PISTP']
self.gtidata['PFSTA'].where(self.gtidata['DFT0STA']>0.0,0.0,inplace=True)
self.gtidata['PFSTP'].where(self.gtidata['DFT0STP']>0.0,0.0,inplace=True)
self.gtidata['ANGSTA'] = self.phi2ang(self.gtidata['PFSTA'])
self.gtidata['ANGSTP'] = self.phi2ang(self.gtidata['PFSTP'])
self.gtidata['ANGSTA'].where(self.gtidata['DFT0STA']>0.0,0.0,inplace=True)
self.gtidata['ANGSTP'].where(self.gtidata['DFT0STP']>0.0,0.0,inplace=True)
self.gtidata['DTSTA'] = self.orbital(self.gtidata['ANGSTA'])\
*np.sin(self.gtidata['ANGSTA']+np.radians(self.omg))
self.gtidata['DTSTP'] = self.orbital(self.gtidata['ANGSTP'])\
*np.sin(self.gtidata['ANGSTP']+np.radians(self.omg))
self.gtidata['CTSTA'] = self.gtidata['TSTA']-self.gtidata['DTSTA']
self.gtidata['CTSTP'] = self.gtidata['TSTP']-self.gtidata['DTSTP']
self.gtidataminday = int(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA'].min())
self.ext2dat['START'] = self.gtidata['CTSTA']
self.ext2dat['STOP'] = self.gtidata['CTSTP']
## correction of header keywords of extension#2
self.ext2hed['TSTART'] = self.ctsta
self.ext2hed['TSTOP'] = self.ctstp
self.ext2hed['TSTABOCA'] = self.ctsta
self.ext2hed.comments['TSTABOCA'] = 'binary-orbit correction amount for TSTART'
self.ext2hed['TSTPBOCA'] = self.ctstp
self.ext2hed.comments['TSTPBOCA'] = 'binary-orbit correction amount for TSTOP'
fits.writeto('borbcorpy_ext2.evt',self.ext2dat,self.ext2hed,clobber=True)
##--------------------------------------------------------------------------
def plt_timdata(self):
##--------------------------------------------------------------------------
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2, sharex=ax1)
fig.subplots_adjust(left=0.12, bottom=0.095, right=0.95,
top=0.95, wspace=0.15, hspace=0.1)
ax1.scatter(self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
self.timdata[(self.timdata['DFT0']>0.0)]['DTIM'],
label='correction amount',
color='red', marker='o', s=2, zorder=1)
ax2.scatter(self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
self.timdata[(self.timdata['DFT0']>0.0)]['CTIM']/86400+self.mjdref-self.timdataminday,
label='after correction',
color="red", marker='o', s=2, zorder=2)
ax2.plot(self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
label='before correction',
color='green', ls='-', lw=1, zorder=1)
for ax in (ax1,ax2):
ax.grid(True)
ax.tick_params(axis='both',which='both',direction='in')
ax.legend(fontsize=8.0, loc=4, scatterpoints=1, numpoints=1,
fancybox=True, framealpha=1.0)
ax.yaxis.set_label_coords(-0.1,0.5)
ax1.tick_params(labelbottom="off")
ax1.set_ylabel(r'$\Delta$t(sec)')
ax2.set_xlabel('MJD-%s(day)'%(str(int(self.timdataminday))))
ax2.set_ylabel('MJD-%s(day)'%(str(int(self.timdataminday))))
fig.suptitle(self.tabfile.replace('.csv',''))
if self.imgout is True:
outimg = self.evt.replace('.evt','_tim.png')
plt.savefig(outimg, format='png', dpi=self.dpi , transparent=True)
print(outimg+' is generated')
##--------------------------------------------------------------------------
def plt_gtidata(self):
##--------------------------------------------------------------------------
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2, sharex=ax1)
fig.subplots_adjust(left=0.12, bottom=0.095, right=0.95,
top=0.95, wspace=0.15, hspace=0.1)
ax1.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DTSTA'],
label='correction amount(TSTART)',
color='red', marker='o', s=2, zorder=1)
ax1.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DTSTP'],
label='correction amount(TSTOP)',
color='blue', marker='o', s=2, zorder=1)
ax2.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['CTSTA']/86400+self.mjdref-self.gtidataminday,
label='after correction(TSTART)',
color="red", marker='o', s=2, zorder=2)
ax2.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['CTSTP']/86400+self.mjdref-self.gtidataminday,
label='after correction(TSTOP)',
color="blue", marker='o', s=2, zorder=2)
ax2.plot(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
label='before correction(TSTART)',
color='green', ls='-', lw=1, zorder=1)
ax2.plot(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
label='before correction(TSTOP)',
color='orange', ls='-', lw=1, zorder=1)
for ax in (ax1,ax2):
ax.grid(True)
ax.tick_params(axis='both',which='both',direction='in')
ax.legend(fontsize=8.0, loc=4, scatterpoints=1, numpoints=1,
fancybox=True, framealpha=1.0)
ax.yaxis.set_label_coords(-0.1,0.5)
ax1.tick_params(labelbottom="off")
ax1.set_ylabel(r'$\Delta$t(sec)')
ax2.set_xlabel('MJD-%s(day)'%(str(int(self.gtidataminday))))
ax2.set_ylabel('MJD-%s(day)'%(str(int(self.gtidataminday))))
fig.suptitle(self.tabfile.replace('.csv',''))
if self.imgout is True:
outimg = self.evt.replace('.evt','_gti.png')
plt.savefig(outimg, format='png', dpi=self.dpi , transparent=True)
print(outimg+' is generated')
##--------------------------------------------------------------------------
def plt_data(self):
##--------------------------------------------------------------------------
fig = plt.figure(figsize=(12, 9))
gs1 = gridspec.GridSpec(2,1)
gs1.update(left=0.1, right=0.48,bottom=0.1, top=0.9,
wspace=0.1, hspace=0.2)
gs2 = gridspec.GridSpec(4,1)
gs2.update(left=0.57, right=0.95, bottom=0.1, top=0.9,
wspace=0.1, hspace=0.1)
ax1 = fig.add_subplot(gs1[0,0])
ax2 = ax1.twiny()
ax3 = fig.add_subplot(gs1[1,0],aspect='equal')
ax4 = fig.add_subplot(gs2[0,0])
ax5 = fig.add_subplot(gs2[1,0],sharex=ax4)
ax6 = fig.add_subplot(gs2[2,0],sharex=ax4)
ax7 = fig.add_subplot(gs2[3,0],sharex=ax4)
##----------------------------------------------------------------------
## visual setting
##----------------------------------------------------------------------
ms = 1 # mark size
ma = '.' # marker
sms = 3 # mark size for scattering plot
sma = 'o' # marker for scattering plot
lw = 1.0 # line width
ls = '-' # line style
sls = '--' # line style for suplement
slw = 2.0 # line style for suplement
fs = 12.0 # font size
lfs = 8.0 # font size for legend
##----------------------------------------------------------------------
## plot table
##----------------------------------------------------------------------
tmp_day = np.arange(0.0, self.porb*1, 1)
tmp_phi = tmp_day/self.porb
ax1.plot(self.table['day'],self.table['ang'],
label="ellipsoidal",color="r",
ls='-',lw=lw,zorder=2)
ax1.plot(self.table['day'],self.table['day']*self.N,
label="circular",color="green",
ls='--',lw=lw,zorder=1)
ax1.scatter(tmp_day,self.phi2ang(tmp_phi),
label="interpolation",color="b",
marker=sma,s=sms,zorder=3,rasterized=True)
x1ticks = np.arange(0.0, round(self.porb)*2, round(round(self.porb)*0.1,1))
x2ticks = np.arange(0.0, 1.1, 0.1)
y1ticks = np.arange(0.0, 2*np.pi+0.1, 0.25*np.pi)
xlim = [ -0.05, 1.05 ] #orbital phase
ylim = [ -0.1, 2*np.pi+0.1 ] #angle in radian
ax1.grid(True)
ax1.xaxis.set_ticks(x1ticks)
ax1.yaxis.set_ticks(y1ticks)
ax1.set_yticklabels(['0.0','','0.5','','1.0','','1.5','','2.0'])
ax1.set_xlim(xlim[0]*self.porb, xlim[1]*self.porb)
ax1.set_ylim(ylim[0], ylim[1])
ax2.xaxis.set_ticks(x2ticks)
ax2.set_xticklabels(['0.0','','0.2','','0.4','','0.6','','0.8','','1.0',''])
ax2.set_xlim(xlim[0], xlim[1])
ax2.set_ylim(ylim[0], ylim[1])
ax2.set_xlabel(r'orbital phase')
ax1.set_xlabel(r'day from periastron passing time')
ax1.set_ylabel(r'$\phi$($\pi$ radian)')
ax1.tick_params(axis='both',which='both',direction='in')
ax1.legend(fontsize=lfs, loc=4, scatterpoints=3, numpoints=1,
fancybox=True, framealpha=1.0)
##----------------------------------------------------------------------
## plot orbital
##----------------------------------------------------------------------
hl = self.asin/15.; hw = self.asin/15.
# the center of the ellipse
x = -1*self.c*np.cos(np.radians(self.omg))
y = -1*self.c*np.sin(np.radians(self.omg))
ax3.add_artist(Ellipse(xy=[x,y],width=2.0*self.asin,height=2.0*self.b,
angle=self.omg,facecolor='none'))
# the focus
xf = [x+self.c*np.cos(np.radians(self.omg)),
x-self.c*np.cos(np.radians(self.omg))]
yf = [y+self.c*np.sin(np.radians(self.omg)),
y-self.c*np.sin(np.radians(self.omg))]
ax3.plot(xf,yf,'xb', zorder=2)
# orbital phase
orb_lll = self.asin*(1-self.ecc**2)
orb_phi = np.arange(0.0,1.0,0.1)
orb_cnf = map(str,list(orb_phi))
if spconj is not None:
orb_phi = orb_phi+self.phispconj
mjr_phi = np.arange(0.0,1.0,0.5)
mjr_ang = self.phi2ang(mjr_phi)
mjr_rad = orb_lll/(1+self.ecc*np.cos(mjr_ang))
mxr = mjr_rad*np.cos(mjr_ang)
myr = mjr_rad*np.sin(mjr_ang)
mxrp = mxr*np.cos(np.radians(self.omg))-myr*np.sin(np.radians(self.omg))
myrp = mxr*np.sin(np.radians(self.omg))+myr*np.cos(np.radians(self.omg))
for q in range(0,len(mjr_phi)):
ax3.plot([xf[0], xf[0]+mxrp[q]],[yf[0], yf[0]+myrp[q]],
color='blue',ls=sls,lw=lw,zorder=1)
orb_ang = self.phi2ang(orb_phi)
orb_rad = orb_lll/(1+self.ecc*np.cos(orb_ang))
# converting the radius based on the focus
# into x,y coordinates on the ellipse:
xr = orb_rad*np.cos(orb_ang)
yr = orb_rad*np.sin(orb_ang)
# accounting for the rotation by anlge:
xrp = xr*np.cos(np.radians(self.omg))-yr*np.sin(np.radians(self.omg))
yrp = xr*np.sin(np.radians(self.omg))+yr*np.cos(np.radians(self.omg))
# put labels outside the "rays"
offset = self.asin*0.2
rLabel = orb_rad+offset
xrl = rLabel*np.cos(orb_ang)
yrl = rLabel*np.sin(orb_ang)
xrpl = xrl*np.cos(np.radians(self.omg)) - yrl*np.sin(np.radians(self.omg))
yrpl = xrl*np.sin(np.radians(self.omg)) + yrl*np.cos(np.radians(self.omg))
tlabel = np.degrees(orb_ang-np.pi)
for q in range(0,len(tlabel)):
if tlabel[q] >= 180.0:
tlabel[q] -= 180.0
for q in range(0,len(orb_phi)):
ax3.plot([xf[0], xf[0]+xrp[q]],[yf[0], yf[0]+yrp[q]],
color='black',ls=sls,lw=lw,zorder=1)
ax3.text(xf[0]+xrpl[q],yf[0]+yrpl[q],orb_cnf[q],
va='center', ha='center',
rotation=self.omg+tlabel[q])
# put an arrow to "Earth"
arrowfactor = -1.4
ax3.arrow(0, 0, 0, arrowfactor*self.asin,
head_length=hl, head_width=hw,
edgecolor='black', facecolor='black', lw=lw, zorder=1)
ax3.text(x=1E-01*self.asin,y=arrowfactor*self.asin,
s='To Earth', fontsize=fs, va='top', ha='left')
# plot observation
angle = self.timdata[(self.timdata['DFT0']>0.0)]['ANG']
self.timdata['r'] = orb_lll/(1+self.ecc*np.cos(angle))
self.timdata['xr'] = self.timdata['r']*np.cos(angle)
self.timdata['yr'] = self.timdata['r']*np.sin(angle)
self.timdata['rxr'] = self.timdata['xr']*np.cos(np.radians(self.omg)) - self.timdata['yr']*np.sin(np.radians(self.omg))
self.timdata['ryr'] = self.timdata['xr']*np.sin(np.radians(self.omg)) + self.timdata['yr']*np.cos(np.radians(self.omg))
ax3.scatter(self.timdata['rxr'], self.timdata['ryr'],
label='Suzaku',color='red',
marker=sma, s=sms, zorder=3, rasterized=True)
# celestial plane
ax3.plot([np.radians(self.omg-270),np.radians(self.omg-90)],
[self.asin*2,self.asin*2],
color='black', ls=ls, lw=lw, ms=ms, marker=ma, zorder=1)
ax3.grid(True)
limin=-2000;limax=2000
axis_tick = np.arange(limin,limax,50)
axis_conf = map(str,list(axis_tick))
ax3.xaxis.set_ticks(axis_tick)
ax3.set_xticklabels(axis_conf)
ax3.yaxis.set_ticks(axis_tick)
ax3.set_yticklabels(axis_conf)
scale = 1.8
ax3.set_xlim(-1*scale*self.asin,scale*self.asin)
ax3.set_ylim(-1*scale*self.asin,scale*self.asin)
ax3.set_xlabel('[lt-s]')
ax3.set_ylabel('[lt-s]')
ax3.yaxis.set_label_coords(-0.085, 0.5)
ax3.axhline(y=0, xmin=limin, xmax=limax, color='green', ls=sls, lw=lw)
##----------------------------------------------------------------------
## plot time & gti data
##----------------------------------------------------------------------
ax4.scatter(self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
self.timdata[(self.timdata['DFT0']>0.0)]['DTIM'],
label='correction amount',
color='red', marker=sma, s=sms, zorder=1, rasterized=True)
ax5.scatter(self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
self.timdata[(self.timdata['DFT0']>0.0)]['CTIM']/86400+self.mjdref-self.timdataminday,
label='after correction',
color="red", marker=sma, s=sms, zorder=2, rasterized=True)
ax5.plot(self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
self.timdata[(self.timdata['DFT0']>0.0)]['DAY']-self.timdataminday,
label='before correction',
color='green', ls=ls, lw=lw, zorder=1)
ax6.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DTSTA'],
label='correction amount(TSTART)',
color='red', marker=sma, s=sms, zorder=1)
ax6.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DTSTP'],
label='correction amount(TSTOP)',
color='blue', marker=sma, s=sms, zorder=1)
ax7.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['CTSTA']/86400+self.mjdref-self.gtidataminday,
label='after correction(TSTART)',
color="red", marker=sma, s=sms, zorder=2, rasterized=True)
ax7.scatter(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['CTSTP']/86400+self.mjdref-self.gtidataminday,
label='after correction(TSTOP)',
color="blue", marker=sma, s=sms, zorder=2, rasterized=True)
ax7.plot(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTA']-self.gtidataminday,
label='before correction(TSTART)',
color='green', ls=ls, lw=lw, zorder=1)
ax7.plot(self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
self.gtidata[(self.gtidata['DFT0STA']>0.0)]['DSTP']-self.gtidataminday,
label='before correction(TSTOP)',
color='orange', ls=ls, lw=lw, zorder=1)
for ax in (ax4,ax5,ax6,ax7):
ax.grid(True)
ax.tick_params(axis='both',which='both',direction='in')
ax.legend(fontsize=lfs, loc=4, scatterpoints=3, numpoints=1,
fancybox=True, framealpha=1.0)
ax.yaxis.set_label_coords(-0.11,0.5)
for ax in (ax4,ax5,ax6):
ax.tick_params(labelbottom="off")
ax4.set_title('event FITS:'+self.evt,fontsize=fs)
ax4.set_ylabel(r'$\Delta$t(sec)')
ax5.set_ylabel('MJD-%s(day)'%(str(int(self.timdataminday))))
ax6.set_ylabel(r'$\Delta$t(sec)')
ax7.set_ylabel('MJD-%s(day)'%(str(int(self.gtidataminday))))
ax7.set_xlabel('MJD-%s(day)'%(str(int(self.timdataminday))))
fig.suptitle('suzaku binary orbital correction TARGET:'+self.target+'/SEQ:'+self.obsid,
fontsize=fs)
if self.imgout is True:
fltype = 'pdf'
outimg = self.evt.replace('.evt','_borbcor.'+fltype)
plt.savefig(outimg, format=fltype, papertype='a4',
dpi=self.dpi)
print(outimg+' is generated')
else:
plt.pause(1.0)
plt.show()
##------------------------------------------------------------------------------
## MAIN
##------------------------------------------------------------------------------
if __name__ == '__main__':
argvs = sys.argv #
argc = len(argvs) #
cmd,mode,porb,ecc,spconj,perast,asin,omg,tab,evt,imgout=setparam(argvs)
if mode is None:
print 'Please input <MODE>'
else:
func = correct(porb,ecc,spconj,perast,asin,omg,tab,evt,imgout)
if str(mode) == 'calc':
if porb is None or ecc is None or tab is None :
usage(cmd)
else:
func.calc_tab()
func.read_tab()
#func.plt_tab()
elif str(mode) == 'corr':
if tab is None \
or porb is None or ecc is None or asin is None or omg is None \
or (spconj is None and perast is None ) or evt is None:
usage(cmd)
else:
func.read_tab()
if perast is None and spconj is not None:
func.spconj2perast()
func.mkcorevt()
#func.plt_timdata()
#func.plt_gtidata()
#func.plt_orb()
func.plt_data()
else:
usage(cmd)
sys.exit()
|
import numpy as np
def _label_axes(ax, x_label, y_label, fontsize=20, rotate_x_ticks=True):
ax.set_xlabel(x_label, fontsize=fontsize)
ax.set_ylabel(y_label, fontsize=fontsize)
if rotate_x_ticks:
from matplotlib.artist import setp
setp(ax.get_xticklabels(), rotation=90)
def plot_contour(x, y, z, x_label, y_label, ax=None, fmt='%.3f', *args, **kwargs):
from matplotlib import pyplot as plt
from matplotlib.mlab import griddata
if ax is None:
ax = plt.gca()
x = np.array(x, dtype=np.float)
y = np.array(y, dtype=np.float)
z = np.array(z, dtype=np.float)
# Remove duplicates from x, y, z
seen_values = {}
for xi, yi, zi in zip(x, y, z):
try:
seen_values[xi, yi].append(zi)
except KeyError:
seen_values[xi, yi] = [zi]
new_x, new_y, new_z = [], [], []
for (xi, yi), zi_list in seen_values.iteritems():
new_x.append(xi)
new_y.append(yi)
# Use median of distances. TODO: is this the right thing to do?
new_z.append(np.median(zi_list))
new_x, new_y, new_z = np.array(new_x, dtype=np.float), np.array(new_y, dtype=np.float), \
np.array(new_z, dtype=np.float)
# Replace x, y, z with new_x, new_y, new_z
x, y, z = new_x, new_y, new_z
min_x = np.min(x)
max_x = np.max(x)
min_y = np.min(y)
max_y = np.max(y)
xi = np.linspace(min_x, max_x, 100)
yi = np.linspace(min_y, max_y, 100)
# Interpolate points to a grid
try:
zi = griddata(x, y, z, xi, yi)
except Exception as e:
raise Exception("Got {0!r} while interpolating the landscape."
"this may be due to `matplotlib.delaunay` package not being able to"
"handle the interpolation. Try installing natgrid package via pip: "
"`pip install git+git://github.com/matplotlib/natgrid.git`".format(e))
# Plot contours
ax.contourf(xi, yi, zi, *args, **kwargs)
cs = ax.contour(xi, yi, zi, colors='k')
# Some labels for contour lines
ax.clabel(cs, inline=True, fmt=fmt)
_label_axes(ax, '${0}$'.format(x_label), '${0}$'.format(y_label), fontsize=20, rotate_x_ticks=True)
def plot_2d_trajectory(x, y,
x_label='', y_label='',
legend=False,
ax=None,
start_and_end_locations_only=False,
start_marker='bo',
end_marker='rx',
start_label='Start',
end_label='End',
*args, **kwargs):
if ax is None:
from matplotlib import pyplot as plt
ax = plt.gca()
if not start_and_end_locations_only:
ax.plot(x, y, *args, **kwargs)
x = np.array(x)
y = np.array(y)
if start_marker != 'arrow':
ax.plot(x[0], y[0], start_marker, label=start_label)
else:
ax.plot(x[0], y[0], 'xk')
ax.annotate(start_label, xy=(x[0], y[0]), xytext=(0.95, 0.01),
textcoords='axes fraction', xycoords='data', arrowprops=dict({'color' : 'k', 'arrowstyle':"->"}),
horizontalalignment='right',verticalalignment='bottom')
if end_marker != 'arrow':
ax.plot(x[-1], y[-1], end_marker, label=end_label)
else:
ax.plot(x[-1], y[-1], 'xk')
ax.annotate(end_label, xy=(x[-1], y[-1]), xytext=(0.05, 0.95),
textcoords='axes fraction', xycoords='data', arrowprops=dict({'color' : 'k', 'arrowstyle':"->"}, ),
horizontalalignment='left',verticalalignment='top')
_label_axes(ax, '${0}$'.format(x_label), '${0}$'.format(y_label), fontsize=20, rotate_x_ticks=True)
if legend:
ax.legend()
|
# encoding:utf-8 -*-
import hashlib
import MySQLdb
import sys
import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
host = "127.0.0.1"
user = "root"
password = "root"
database = "Blog"
charset = "utf8"
def open():
conn = MySQLdb.connect(host, user, password, database, charset=charset)
cursor = conn.cursor()
return conn, cursor
def close(conn, cursor):
conn.close()
cursor.close()
def allArticle():
conn, cursor = open()
cursor.execute("SELECT content,id from craft where type = '1' order by id DESC ;")
result = cursor.fetchall()
close(conn, cursor)
return result
def getArticle(id):
conn, cursor = open()
cursor.execute("SELECT content,pubTime,type,name from craft,user where craft.id = %s and userid = user.id;" % id)
result = cursor.fetchall()
close(conn, cursor)
return result
def APIlogin(result):
conn, cursor = open()
result[0] = MySQLdb.escape_string(result[0])
result[1] = MySQLdb.escape_string(result[1])
cursor.execute("select name,password from user where name = '%s'" % result[0])
result = cursor.fetchall()
close(conn, cursor)
return result
def addArticle(data, userId):
conn, cursor = open()
data["title"] = MySQLdb.escape_string(data["title"].decode("utf-8"))
data["content"] = MySQLdb.escape_string(data["content"].decode("utf-8"))
cursor = conn.cursor()
today = datetime.date.today()
cursor.execute(
"insert into craft(userid,title,content,pubTime,type) values ('%s','%s','%s','%s','%s')" % (
userId, data["title"], data["content"], today.strftime("%Y-%m-%d"), "0"))
conn.commit()
close(conn, cursor)
return
def articleManage():
conn, cursor = open()
cursor.execute("SELECT id,title,type,pubTime from craft where type != '4' ORDER BY pubTime desc;")
result = cursor.fetchall()
close(conn, cursor)
return result
def change(result):
conn, cursor = open()
result[0] = MySQLdb.escape_string(str(result[0]))
result[1] = MySQLdb.escape_string(result[1])
cursor = conn.cursor()
cursor.execute("UPDATE craft set type = '%s' where id = '%s';" % (result[1], result[0]))
conn.commit()
close(conn, cursor)
return
def verifyToken():
conn, cursor = open()
cursor.execute("select password,id from user ;")
result = cursor.fetchall()
close(conn, cursor)
return result
def createUser():
conn, cursor = open()
cursor.execute("DROP table if EXISTS user")
cursor.execute('''create table user (
id INT(11) primary key not null unique auto_increment,
name VARCHAR(45),
isAdmin VARCHAR(45),
regTime DATE,
password VARCHAR(45)
)''')
close(conn, cursor)
return
def createCraft():
conn, cursor = open()
cursor.execute("DROP table if EXISTS craft")
cursor.execute('''create table craft (
id INT(11) primary key not null unique auto_increment,
userid INT(11),
title LONGTEXT,
content LONGTEXT,
pubTime date,
type INT(11)
)
''')
close(conn, cursor)
return
def insertUser(username, password):
conn, cursor = open()
today = datetime.date.today()
Md5 = hashlib.md5()
Md5.update(password)
Md5hex = Md5.hexdigest()
Md52 = hashlib.md5()
Md52.update(Md5hex)
password_twice = Md52.hexdigest()
cursor.execute("insert into user values('1','%s','1','%s','%s')" % (
username, today.strftime("%Y-%m-%d"), password_twice))
conn.commit()
close(conn, cursor)
return
def insertCraft():
conn, cursor = open()
today = datetime.date.today()
cursor.execute(
"insert into craft values('1','1','!','# !\n\n* DTP,; \n* ;\n* !','%s','1');" % (
today.strftime(
"%Y-%m-%d")))
conn.commit()
close(conn, cursor)
return
|
import asyncio
import functools
import async_timeout
from thriftpy2.thrift import TMessageType
from .protocol import TBinaryProtocol
from .util import args2kwargs
from .errors import ConnectionClosedError, ThriftAppError
from .log import logger
async def create_connection(
service,
address=("127.0.0.1", 6000),
*,
protocol_cls=TBinaryProtocol,
timeout=None,
**kw,
):
"""Create a thrift connection.
This function is a :ref:`coroutine <coroutine>`.
Open a connection to the thrift server by address argument.
:param service: a thrift service object
:param address: a (host, port) tuple
:param protocol_cls: protocol type, default is :class:`TBinaryProtocol`
:param timeout: if specified, would raise `asyncio.TimeoutError`
if one rpc call is longer than `timeout`
:param kw: params related to asyncio.open_connection()
:return: newly created :class:`ThriftConnection` instance.
"""
host, port = address
reader, writer = await asyncio.open_connection(host, port, **kw)
iprotocol = protocol_cls(reader)
oprotocol = protocol_cls(writer)
return ThriftConnection(
service, iprot=iprotocol, oprot=oprotocol, address=address, timeout=timeout
)
class ThriftConnection:
"""
Thrift Connection.
"""
def __init__(self, service, *, iprot, oprot, address, timeout=None):
self.service = service
self._reader = iprot.trans
self._writer = oprot.trans
self.timeout = timeout
self.address = address
self.closed = False
self._oprot = oprot
self._iprot = iprot
self._seqid = 0
self._init_rpc_apis()
def _init_rpc_apis(self):
"""
find out all apis defined in thrift service, and create corresponding
method on the connection object, ignore it if some api name is conflicted with
an existed attribute of the connection object, which you should call by using
the :meth:`execute` method.
"""
for api in self.service.thrift_services:
if not hasattr(self, api):
setattr(self, api, functools.partial(self.execute, api))
else:
logger.warning(
"api name {0} is conflicted with connection attribute "
'{0}, while you can still call this api by `execute("{0}")`'.format(
api
)
)
def __repr__(self):
return "<ThriftConnection {} to>".format(self.address)
async def execute(self, api, *args, **kwargs):
"""
Execute a rpc call by api name. This is function is a :ref:`coroutine <coroutine>`.
:param api: api name defined in thrift file
:param args: positional arguments passed to api function
:param kwargs: keyword arguments passed to api function
:return: result of this rpc call
:raises: :class:`~asyncio.TimeoutError` if this task has exceeded the `timeout`
:raises: :class:`ThriftAppError` if thrift response is an exception defined in thrift.
:raises: :class:`ConnectionClosedError`: if server has closed this connection.
"""
if self.closed:
raise ConnectionClosedError("Connection closed")
try:
with async_timeout.timeout(self.timeout):
kw = args2kwargs(
getattr(self.service, api + "_args").thrift_spec, *args
)
kwargs.update(kw)
result_cls = getattr(self.service, api + "_result")
self._seqid += 1
self._oprot.write_message_begin(api, TMessageType.CALL, self._seqid)
args = getattr(self.service, api + "_args")()
for k, v in kwargs.items():
setattr(args, k, v)
args.write(self._oprot)
self._oprot.write_message_end()
await self._oprot.trans.drain()
if not getattr(result_cls, "oneway"):
result = await self._recv(api)
return result
except asyncio.TimeoutError:
self.close()
raise
except ConnectionError as e:
self.close()
logger.debug("connection error {}".format(str(e)))
raise ConnectionClosedError("the server has closed this connection") from e
except asyncio.IncompleteReadError as e:
self.close()
raise ConnectionClosedError("Server connection has closed") from e
async def _recv(self, api):
"""
A :ref:`coroutine <coroutine>` which receive response from the thrift server
"""
fname, mtype, rseqid = await self._iprot.read_message_begin()
if rseqid != self._seqid:
# transport should be closed if bad seq happened
self.close()
raise ThriftAppError(
ThriftAppError.BAD_SEQUENCE_ID,
fname + " failed: out of sequence response",
)
if mtype == TMessageType.EXCEPTION:
x = ThriftAppError()
await self._iprot.read_struct(x)
await self._iprot.read_message_end()
raise x
result = getattr(self.service, api + "_result")()
await self._iprot.read_struct(result)
await self._iprot.read_message_end()
if hasattr(result, "success") and result.success is not None:
return result.success
# void api without throws
if len(result.thrift_spec) == 0:
return
# check throws
for k, v in result.__dict__.items():
if k != "success" and v:
raise v
if hasattr(result, "success"):
raise ThriftAppError(ThriftAppError.MISSING_RESULT)
def close(self):
self._writer.close()
self.closed = True
|
from builtins import map
from builtins import range
from copy import deepcopy
from itertools import product
from sqlalchemy.sql import select
from fonduer.models import TemporaryImage, TemporaryDetailedImage
from fonduer.snorkel.candidates import CandidateSpace, Ngrams
from fonduer.snorkel.models import Candidate
from fonduer.snorkel.models.context import Document
from fonduer.models.context import DetailedFigure
from fonduer.snorkel.udf import UDF, UDFRunner
class CandidateExtractor(UDFRunner):
"""An operator to extract Candidate objects from a Context.
:param candidate_class: The type of relation to extract, defined using
:func:`snorkel.models.candidate_subclass <snorkel.models.candidate.candidate_subclass>`
:param cspaces: one or list of :class:`CandidateSpace` objects, one for each relation argument. Defines space of
Contexts to consider
:param matchers: one or list of :class:`snorkel.matchers.Matcher` objects, one for each relation argument. Only tuples of
Contexts for which each element is accepted by the corresponding Matcher will be returned as Candidates
:param candidate_filter: an optional function for filtering out candidates which returns a Boolean expressing whether or not
the candidate should be instantiated.
:param self_relations: Boolean indicating whether to extract Candidates that relate the same context.
Only applies to binary relations. Default is False.
:param nested_relations: Boolean indicating whether to extract Candidates that relate one Context with another
that contains it. Only applies to binary relations. Default is False.
:param symmetric_relations: Boolean indicating whether to extract symmetric Candidates, i.e., rel(A,B) and rel(B,A),
where A and B are Contexts. Only applies to binary relations. Default is True.
"""
def __init__(self, candidate_class, cspaces, matchers, candidate_filter=None, self_relations=False, nested_relations=False, symmetric_relations=True):
"""Initialize the CandidateExtractor."""
super(CandidateExtractor, self).__init__(CandidateExtractorUDF,
candidate_class=candidate_class,
cspaces=cspaces,
matchers=matchers,
candidate_filter=candidate_filter,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations)
def apply(self, xs, split=0, **kwargs):
"""Call the CandidateExtractorUDF."""
super(CandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
"""Delete Candidates from given split the database."""
session.query(Candidate).filter(Candidate.split == split).delete()
class CandidateExtractorUDF(UDF):
"""UDF for performing candidate extraction."""
def __init__(self, candidate_class, cspaces, matchers, candidate_filter, self_relations, nested_relations, symmetric_relations, **kwargs):
"""Initialize the CandidateExtractorUDF."""
self.candidate_class = candidate_class
self.candidate_spaces = cspaces if type(cspaces) in [list, tuple] else [cspaces]
self.matchers = matchers if type(matchers) in [list, tuple] else [matchers]
self.candidate_filter = candidate_filter
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
# Check that arity is same
if len(self.candidate_spaces) != len(self.matchers):
raise ValueError("Mismatched arity of candidate space and matcher.")
else:
self.arity = len(self.candidate_spaces)
# Make sure the candidate spaces are different so generators aren't expended!
self.candidate_spaces = list(map(deepcopy, self.candidate_spaces))
# Preallocates internal data structures
self.child_context_sets = [None] * self.arity
for i in range(self.arity):
self.child_context_sets[i] = set()
super(CandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, **kwargs):
"""Extract candidates from the given Context.
Here, we define a context as a Phrase.
:param context:
:param clear:
:param split: Which split to use.
"""
# Generate TemporaryContexts that are children of the context using the candidate_space and filtered
# by the Matcher
for i in range(self.arity):
self.child_context_sets[i].clear()
for tc in self.matchers[i].apply(self.candidate_spaces[i].apply(self.session, context)):
tc.load_id_or_insert(self.session)
self.child_context_sets[i].add(tc)
# Generates and persists candidates
candidate_args = {'split': split}
for args in product(*[enumerate(child_contexts) for child_contexts in self.child_context_sets]):
# Apply candidate_filter if one was given
# Accepts a tuple of Context objects (e.g., (Span, Span))
# (candidate_filter returns whether or not proposed candidate passes throttling condition)
if self.candidate_filter:
if not self.candidate_filter(tuple(args[i][1] for i in range(self.arity))):
continue
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations
# Modified by Zhewen (zsong39@wisc.edu) catch attribute error
try:
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
except AttributeError:
pass
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
# Checking for existence
if not clear:
q = select([self.candidate_class.id])
for key, value in list(candidate_args.items()):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
class OmniNgrams(Ngrams):
"""
Defines the space of candidates.
Defines the space of candidates as all n-grams (n <= n_max) in a Document _x_,
divided into Phrases inside of html elements (such as table cells).
"""
def __init__(self, n_max=5, split_tokens=['-', '/']):
"""
Initialize OmniNgrams.
"""
Ngrams.__init__(self, n_max=n_max, split_tokens=split_tokens)
def apply(self, session, context):
"""
Generate OmniNgrams from a Document by parsing all of its Phrases.
"""
if not isinstance(context, Document):
raise TypeError("Input Contexts to OmniNgrams.apply() must be of type Document")
doc = session.query(Document).filter(Document.id == context.id).one()
for phrase in doc.phrases:
for ts in Ngrams.apply(self, phrase):
yield ts
class OmniFigures(CandidateSpace):
"""
Defines the space of candidates as all figures in a Document _x_,
indexing by **position offset**.
"""
def __init__(self, type=None):
"""
Initialize OmniFigures.
Only support figure type filter.
"""
CandidateSpace.__init__(self)
if type is not None:
self.type=type.strip().lower()
self.type = None
def apply(self, session, context):
"""
Generate OmniFigures from a Document by parsing all of its Figures.
"""
if not isinstance(context, Document):
raise TypeError("Input Contexts to OmniFigures.apply() must be of type Document")
doc = session.query(Document).filter(Document.id == context.id).one()
for figure in doc.figures:
if self.type is None or figure.url.lower().endswith(self.type):
yield TemporaryImage(figure)
# Added by Wei & Zhewen
class OmniDetailedFigures(CandidateSpace):
"""
Defines the space of candidates as all figures in a Document _x_,
indexing by **position offset**.
"""
def __init__(self, type=None):
"""
Initialize OmniFigures.
Only support figure type filter.
"""
CandidateSpace.__init__(self)
if type is not None:
self.type=type.strip().lower()
self.type = None
def apply(self, session, context):
"""
Generate OmniFigures from a Document by parsing all of its Figures.
"""
if not isinstance(context, Document):
raise TypeError("Input Contexts to OmniFigures.apply() must be of type Document")
doc = session.query(Document).filter(Document.id == context.id).one()
for figure in doc.detailed_figures:
if self.type is None or figure.url.lower().endswith(self.type):
yield TemporaryDetailedImage(figure)
|
"""
Problem 2_7:
Heron's formula for computing the area of a triangle with sides a, b, and c is
as follows. Let s = .5(a + b + c) --- that is, 1/2 of the perimeter of the
triangle. Then the area is the square root of s(s-a)(s-b)(s-c). You can compute
the square root of x by x**.5 (raise x to the 1/2 power). Use an input
statement to get the length of the sides. Don't forget to convert this input
to a real number using float(). Adjust your output to be just like what you
see below. Here is a run of my program:
problem2_7()
Enter length of side one: 9
Enter length of side two: 12
Enter length of side three: 15
Area of a triangle with sides 9.0 12.0 15.0 is 54.0
"""
# %%
def problem2_7():
""" computes area of triangle using Heron's formula. """
a = float(input("Enter length of side one: "))
b = float(input("Enter length of side two: "))
c = float(input("Enter length of side three: "))
s = (a + b + c)/2
areas = s*((s-a)*(s-b)*(s-c))
print("Area of a triangle with sides", a, b, c, "is",areas ** 0.5 )
#print(problem2_7())
|
class FromFile:
def __init__(self, filepath, new_lines = False):
self.filepath = filepath
with open(self.filepath, "r") as readfile:
self.lines = readfile.readlines()
if new_lines:
for i in range(len(self.lines)):
self.lines[i] = self.lines[i].replace("\n", "<br>")
else:
for i in range(len(self.lines)):
self.lines[i] = self.lines[i].replace("\n", "")
def get_content_list(self):
return self.lines
|
from tests.utils import build_sphinx, assert_doc_equal, parse_doc
def test_headings(common_src_dir, expected_common_dir):
out_dir = build_sphinx(common_src_dir, ['headings'])
assert_doc_equal(
parse_doc(out_dir, 'headings'),
parse_doc(expected_common_dir, 'headings'),
)
|
from collections import Counter
import io
import json
import os
from typing import Dict
_TABLE_HEADER = "\n|Ranking|Solver|Score|\n|---:|:---|---:|\n"
class BayesmarkReportBuilder:
def __init__(self) -> None:
self._solvers: Counter = Counter()
self._datasets: Counter = Counter()
self._models: Counter = Counter()
self._problems_counter = 1
self._problems_body = io.StringIO()
def add_problem(self, name: str, scores: Dict[str, float]) -> "BayesmarkReportBuilder":
if self._problems_body.closed:
self._problems_body = io.StringIO()
problem_title = f"### ({self._problems_counter}) Problem: {name}\n"
self._problems_body.write(problem_title)
self._problems_body.write(_TABLE_HEADER)
for idx, (solver, score) in enumerate(scores.items()):
row = f"|{idx + 1}|{solver}|{score:.5f}|\n"
self._problems_body.write(row)
self._solvers.update(scores.keys())
self._problems_counter += 1
return self
def add_dataset(self, dataset: str) -> "BayesmarkReportBuilder":
self._datasets.update([dataset])
return self
def add_model(self, model: str) -> "BayesmarkReportBuilder":
self._models.update([model])
return self
def assemble_report(self) -> str:
num_datasets = len(self._datasets)
num_models = len(self._models)
with open(os.path.join("benchmarks", "bayesmark", "report_template.md")) as file:
report_template = file.read()
# TODO(xadrianzetx) Consider using proper templating engine.
report = report_template.format(
num_solvers=len(self._solvers),
num_datasets=num_datasets,
num_models=num_models,
num_problems=num_datasets * num_models,
leaderboards=self._problems_body.getvalue(),
)
self._problems_body.close()
return report
def build_report() -> None:
report_builder = BayesmarkReportBuilder()
for partial_name in os.listdir("partial"):
dataset, model, *_ = partial_name.split("-")
path = os.path.join("partial", partial_name)
with open(path) as file:
scores = json.load(file)
problem_name = f"{dataset.capitalize()}-{model}"
report_builder = (
report_builder.add_problem(problem_name, scores)
.add_dataset(dataset)
.add_model(model)
)
report = report_builder.assemble_report()
with open(os.path.join("report", "benchmark-report.md"), "w") as file:
file.write(report)
if __name__ == "__main__":
os.makedirs("report", exist_ok=True)
build_report()
|
import pytest
import os
import json
import time
from os.path import exists
from mamba.core.context import Context
from mamba.core.msg import Empty
from mamba.component.gui.msg import RunAction
from mamba.component.gui.tk.save_view_tk import SaveViewComponent
from mamba.component.gui.tk import MainWindowTk
from mamba.core.testing.utils import CallbackTestClass
class TestClass:
project_name = 'testproject'
def setup_method(self):
""" setup_method called for every method """
self.context = Context()
self.context.set(
'mamba_dir',
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',
'..', 'mamba'))
def teardown_method(self):
""" teardown_method called for every method """
del self.context
def test_component_wo_context(self):
with pytest.raises(TypeError) as excinfo:
SaveViewComponent()
assert "missing 1 required positional argument" in str(excinfo.value)
def test_component_w_empty_context(self):
component = SaveViewComponent(self.context)
component.initialize()
# Test default configuration
assert component._configuration == {
'menu': 'View',
'name': 'Save View'
}
assert component._app is None
assert component._views == []
def test_generate_views_publication(self):
dummy_test_class = CallbackTestClass()
self.context.rx['generate_perspective'].subscribe(
dummy_test_class.test_func_1)
component = SaveViewComponent(self.context)
component.initialize()
# Test default configuration
assert component._configuration == {
'menu': 'View',
'name': 'Save View'
}
assert component._app is None
assert component._views == []
self.context.rx['run_plugin'].on_next(
RunAction(menu_title='View', action_name='Save View'))
time.sleep(.1)
assert dummy_test_class.func_1_times_called == 1
assert isinstance(dummy_test_class.func_1_last_value, Empty)
component._app.quit()
component._app.destroy()
def test_save_views(self):
dummy_test_class = CallbackTestClass()
component = SaveViewComponent(self.context)
component.initialize()
# Test default configuration
assert component._configuration == {
'menu': 'View',
'name': 'Save View'
}
assert component._app is None
assert component._views == []
component._process_component_perspective({
"menu_title": "Utils",
"action_name": "tc_window",
"data": {
"pos_x": 0,
"pos_y": 0,
"width": 670,
"height": 296,
"services": ["digitizer -> connect", "shutdown -> shutdown"]
}
})
component.save_views('test_view_1.json')
assert exists('test_view_1.json')
with open('test_view_1.json', "r") as read_file:
assert json.load(read_file) == [{
"menu_title": "Utils",
"action_name": "tc_window",
"data": {
"pos_x": 0,
"pos_y": 0,
"width": 670,
"height": 296,
"services":
["digitizer -> connect", "shutdown -> shutdown"]
}
}]
os.remove('test_view_1.json')
component.save_views('test_view_2')
assert exists('test_view_2.json')
with open('test_view_2.json', "r") as read_file:
assert json.load(read_file) == [{
"menu_title": "Utils",
"action_name": "tc_window",
"data": {
"pos_x": 0,
"pos_y": 0,
"width": 670,
"height": 296,
"services":
["digitizer -> connect", "shutdown -> shutdown"]
}
}]
component._process_component_perspective({
"menu_title": "Utils",
"action_name": "tm_window",
"data": {
"pos_x":
670,
"pos_y":
0,
"width":
653,
"height":
296,
"services": [
"cyclic_telemetry_tcp -> connected",
"digitizer -> connected"
]
}
})
os.remove('test_view_2.json')
component.save_views('test_view_3')
assert exists('test_view_3.json')
with open('test_view_3.json', "r") as read_file:
assert json.load(read_file) == [{
"menu_title": "Utils",
"action_name": "tc_window",
"data": {
"pos_x": 0,
"pos_y": 0,
"width": 670,
"height": 296,
"services":
["digitizer -> connect", "shutdown -> shutdown"]
}
}, {
'action_name': 'tm_window',
'data': {
'height':
296,
'pos_x':
670,
'pos_y':
0,
'services': [
'cyclic_telemetry_tcp -> connected',
'digitizer -> connected'
],
'width':
653
},
'menu_title': 'Utils'
}]
os.remove('test_view_3.json')
def test_component_w_menu_window(self):
main_window = MainWindowTk(self.context)
main_window.initialize()
# Test help is not in menu bar
assert not main_window._exists_menu('&Help')
component = SaveViewComponent(self.context)
component.initialize()
# Test default configuration
assert component._configuration == {
'menu': 'View',
'name': 'Save View'
}
assert component._app is None
assert component._views == []
# Test menu is in menu bar
assert main_window._exists_menu('View')
assert main_window._is_action_in_menu('View', 'Save View')
# Force close of any opened windows
main_window._load_app.quit()
main_window._load_app.destroy()
|
from django.views.generic import ListView, DetailView, TemplateView
import markdown
from apps.users.models import CustomUser
from apps.tools.models import Tool
from apps.shop.models import Product
from .models import Tag, Article
class HomepageListView(ListView):
model = Article
context_object_name = "articles"
template_name = "articles/home.html"
queryset = Article.objects.all().is_published()[:5]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["main_author"] = CustomUser.objects.filter(main_user=True).first()
context["tools"] = Tool.objects.all()[:5]
context["products"] = Product.objects.all()[:5]
return context
class ArticleListView(ListView):
model = Article
context_object_name = "articles"
template_name = "articles/articles.html"
queryset = Article.objects.all().is_published()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["main_author"] = CustomUser.objects.filter(main_user=True).first()
return context
class ArticleDetailView(DetailView):
model = Article
context_object_name = "article"
template_name = "articles/article_detail.html"
def get_object(self):
obj = super(DetailView, self).get_object()
obj.update_views()
obj.content = obj.content_to_markdown()
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["main_author"] = CustomUser.objects.filter(main_user=True).first()
return context
class TagDetailView(DetailView):
model = Tag
context_object_name = "tag"
template_name = "articles/tag_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["main_author"] = CustomUser.objects.filter(main_user=True).first()
context["articles"] = self.object.article_set.all()
context["tools"] = self.object.tool_set.all()
context["products"] = self.object.product_set.all()
return context
|
import PySimpleGUI as sg
class UI:
pad = (30, 2)
layout = [
[sg.Button('PWN', size=(10, 2), pad=pad), sg.Button('Inject', size=(10, 2), pad=pad), sg.Button(
'Reset', size=(10, 2), pad=pad), sg.Button('Quit', size=(10, 2), pad=pad)],
[sg.Text(size=(100, 15), key='-OUTPUT-', background_color='#202124',grab=True)]]
window = sg.Window('FancyRat', layout, size=(
800, 480), font=('Hack-Regular', 14))
window.BackgroundColor = '#081a2b'
window.ButtonColor = (('#FFFFFF', '#ad460e'))
|
#!/usr/bin/env python3
import re
import pandas
import subprocess
import sys
def run_and_get_breakdown(cmd):
ret = subprocess.check_output(cmd.split()).decode(sys.stdout.encoding)
pattern = 'compute Time = .*? \((?P<percent>[01]\.[0-9][0-9][0-9])'
compute = re.search(pattern, ret)
compute = compute.group('percent')
pattern = 'graph Time = .*? \((?P<percent>[01]\.[0-9][0-9][0-9])'
graph = re.search(pattern, ret)
graph = graph.group('percent')
return compute, graph
labels = ['Benchmarks', 'Compute', 'Graph']
benchmarks = []
computes = []
graphs = []
# Run BFS
cmd = 'mpirun -np 1 numactl -i all bin/BFS data/cora.mtx 4'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('BFS')
computes.append(compute)
graphs.append(graph)
# Run Delta Stepping
cmd = 'mpirun -np 1 numactl -i all bin/DeltaStepping data/cora.mtx 1 6'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('DELTA')
computes.append(compute)
graphs.append(graph)
# Run LDA
cmd = 'mpirun -np 1 numactl -i all bin/LDA data/cora.mtx 2000 708 100'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('LDA')
computes.append(compute)
graphs.append(graph)
# Run PageRank
cmd = 'mpirun -np 1 numactl -i all bin/PageRank data/cora.mtx'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('PR')
computes.append(compute)
graphs.append(graph)
# Run SGD
cmd = 'mpirun -np 1 numactl -i all bin/SGD data/cora.mtx'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('SGD')
computes.append(compute)
graphs.append(graph)
# Run SSSP
cmd = 'mpirun -np 1 numactl -i all bin/SSSP data/cora.mtx 6'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('SSSP')
computes.append(compute)
graphs.append(graph)
# Run Topological Sort
cmd = 'mpirun -np 1 numactl -i all bin/TopologicalSort data/cora.mtx'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('TOP')
computes.append(compute)
graphs.append(graph)
# Run Triangle Counting
cmd = 'mpirun -np 1 numactl -i all bin/TriangleCounting data/cora.mtx'
compute, graph = run_and_get_breakdown(cmd)
benchmarks.append('TRI')
computes.append(compute)
graphs.append(graph)
# Save as a csv
print(','.join(labels))
for entry in zip(benchmarks, computes, graphs):
print(','.join(entry))
|
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait # type: ignore
from .....constant import timeout
from .....exception.timeout import WaitForPageTitleToChangeTimeoutException
def wait_until_page_title_equals(driver: object, page_title: str, timeout: int = timeout.DEFAULT) -> None:
try:
WebDriverWait(driver, timeout).until(EC.title_is(page_title)) # type: ignore
except TimeoutException:
raise WaitForPageTitleToChangeTimeoutException(driver, page_title) from TimeoutException
|
#!/usr/bin/env python
import sys
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
try:
from pypacket_dissector import encoder as en
except:
import encoder as en
def main():
ap = ArgumentParser(description="a packet encoder.",
formatter_class=ArgumentDefaultsHelpFormatter)
ap.add_argument("infile", metavar="INFILE", nargs="?", type=str,
default="-",
help='''specify a filename containing json.
default is stdin.''')
ap.add_argument("-v", action="store_true", dest="f_verbose",
help="enable verbose mode.")
ap.add_argument("-d", action="store_true", dest="f_debug",
help="enable debug mode.")
opt = ap.parse_args()
#
if opt.infile == "-":
jo = sys.stdin.read()
else:
jo = open(opt.infile, "r").read()
#
jd = en.load_json_packet(jo)
ret = en.encoder(jd)
sys.stdout.buffer.write(ret)
if __name__ == "__main__":
main()
|
from simulation.motor.base_motor import BaseMotor
from simulation.motor.basic_motor import BasicMotor
|
# Specify which file (without .py extension) in the arts folder should be used
artFile = "art"
#artFile = "test"
# Speed of art
speed = 0.0043 #0.004
# Print code in the beginning
codePrint = False
codingSpeed = 0.001
codeColor='red'
# Audio
playAudio = True
audio = 'HappyBirthday.mp3'
# Random color is choosen from the list
color = ['red','green','yellow','blue','magenta','cyan','white']
# Change the keys of the dict to change the color codes
# If you change the color codes for blink, remove blink(none) and random, you have to change it in pprint() function of PyBirthdayWish.py too.
colorCodes = {'①':'grey','②':'red','③':'green','④':'yellow','⑤':'blue','⑥':'magenta','⑦':'cyan','⑧':'white','⑨':'blink','⑩':'none','®':'random'}
|
# -*- coding: utf-8 -*-
#Coded By Ashkan Rafiee https://github.com/AshkanRafiee/PingWidget/
################Libraries################
import PySimpleGUI as sg
import webbrowser
import time
import threading
from pythonping import ping
################Libraries################
status = None
mypings1 = []
mypings2 = []
mypings3 = []
mypings4 = []
################GUI################
sg.theme('DarkGreen5') # Add a touch of color
# All the stuff inside your window.
layout = [[sg.Text('First IP',size=(32,1),pad=(20,1)),sg.Text('Second IP',size=(31,1),pad=(20,1)),sg.Text('Third IP',size=(32,1),pad=(20,1)),sg.Text('Fourth IP',pad=(20,1))],
[sg.Input(size=(36,1), key='ip1',pad=(20,15)),sg.Input(size=(36,1), key='ip2',pad=(20,15)),sg.Input(size=(36,1), key='ip3',pad=(20,15)),sg.Input(size=(36,1), key='ip4',pad=(20,15))],
[sg.Listbox(values='',size=(34, 10),pad=(20,1), key='list1'),sg.Listbox(values='',size=(34, 10),pad=(20,1), key='list2'),sg.Listbox(values='',size=(34, 10),pad=(20,1), key='list3'),sg.Listbox(values='',size=(34, 10),pad=(20,1), key='list4')],
[sg.Button('Website',pad=(170,30),size=(15,3)), sg.Button('Start',pad=(2,30),size=(15,3)), sg.Button('Stop',pad=(5,30),size=(15,3)), sg.Button('Exit',pad=(170,30),size=(15,3))],
[sg.Text('Made By Ashkan Rafiee')]]
# Create the Window
window = sg.Window('WarpRefer - Get Free Warp Plus Referrals!', layout)
# Event Loop to process "events" and get the "values" of the inputs
################GUI################
def disabler():
window['ip1'].update(disabled=True)
window['ip2'].update(disabled=True)
window['ip3'].update(disabled=True)
window['ip4'].update(disabled=True)
def enabler():
window['ip1'].update(disabled=False)
window['ip2'].update(disabled=False)
window['ip3'].update(disabled=False)
window['ip4'].update(disabled=False)
def valueinputer():
global ip1,ip2,ip3,ip4
ip1 = str(values['ip1'])
ip2 = str(values['ip2'])
ip3 = str(values['ip3'])
ip4 = str(values['ip4'])
def uidataremover():
global mypings1,mypings2,mypings3,mypings4
mypings1 = []
mypings2 = []
mypings3 = []
mypings4 = []
window.Element('list1').Update(values=mypings1)
window.Element('list2').Update(values=mypings2)
window.Element('list3').Update(values=mypings3)
window.Element('list4').Update(values=mypings4)
def autopinger1():
global mypings1
while status:
if ip1 == "":
result = ping("tehran.ir",count=1)
mypings1.append(result)
window.Element('list1').Update(values=mypings1)
time.sleep(1)
else:
result = ping(ip1,count=1)
mypings1.append(result)
window.Element('list1').Update(values=mypings1)
time.sleep(1)
def autopinger2():
global mypings2
while status:
if ip2 == "":
result = ping("192.168.68.1",count=1)
mypings2.append(result)
window.Element('list2').Update(values=mypings2)
time.sleep(1)
else:
result = ping(ip2,count=1)
mypings2.append(result)
window.Element('list2').Update(values=mypings2)
time.sleep(1)
def autopinger3():
global mypings3
while status:
if ip3 == "":
result = ping("4.2.2.4",count=1)
mypings3.append(result)
window.Element('list3').Update(values=mypings3)
time.sleep(1)
else:
result = ping(ip3,count=1)
mypings3.append(result)
window.Element('list3').Update(values=mypings3)
time.sleep(1)
def autopinger4():
global mypings4
while status:
if ip4 == "":
result = ping("8.8.8.8",count=1)
mypings4.append(result)
window.Element('list4').Update(values=mypings4)
time.sleep(1)
else:
result = ping(ip4,count=1)
mypings4.append(result)
window.Element('list4').Update(values=mypings4)
time.sleep(1)
def threadhandler():
threading.Thread(target=autopinger1, daemon=True).start()
threading.Thread(target=autopinger2, daemon=True).start()
threading.Thread(target=autopinger3, daemon=True).start()
threading.Thread(target=autopinger4, daemon=True).start()
while True:
event, values = window.read()
if event == 'Start':
status = True
disabler()
valueinputer()
threadhandler()
if event == 'Stop':
status = False
enabler()
uidataremover()
if event == 'Website':
webbrowser.open_new('https://ashkanrafiee.ir/PingWidget')
if event == sg.WIN_CLOSED or event == 'Exit': # if user closes window or clicks cancel
break
|
__author__ = "Andre Barbe"
__project__ = "Auto-GTAP"
__created__ = "2018-3-15"
from typing import List
class ImportCsvSl4(object):
"""Imports the CSV Files created by SLTOHT"""
__slots__ = ["file_path_list"]
def __init__(self, file_path_list: List[str]) -> None:
self.file_path_list = file_path_list
@staticmethod
def filecontents(filepath) -> List[str]:
"""
Reads the CSV file into memory
:return:
"""
with open(filepath, "r") as reader: # Read the csv file
return [line for line in reader.readlines() if
line != " \n"] # deletes lines that are nothing but line breaks
def create(self) -> dict:
"""
Takes the CSV file of lines and returns a dictionary of cleaned values
:return:
"""
variable_values = {}
for file_path in self.file_path_list:
name_variable = []
list_variable_properties = [
"Linear",
"PreLevel",
"PostLevel",
"Changes"
]
for line in self.filecontents(file_path):
# checks if line contains name of variable by looking if it begins with "! The" and endswith "#" and a
# line break
if line[0:7] == " ! The " and line[-5:] == "part\n":
# Variable name is between the 7th character of the line and the first space after that character
name_variable = (line[7:].split(" "))[0]
if name_variable: # runs if list is nonempty
if line.split(",")[0].strip() in list_variable_properties:
# line defines name of matrix if its equal to array name followed by "("
variable_index = line.split(",")[0].strip()
variable_value = line.split(",")[1].strip()
key = (file_path, name_variable, variable_index)
variable_values[key] = variable_value
return variable_values
|
import cv2
import numpy as np
import math
from util import Util as u
class Prep(object):
def __init__(self, path):
self.__img = cv2.imread(path, cv2.IMREAD_COLOR)
self.__imgray = cv2.cvtColor(self.__img, cv2.COLOR_BGR2GRAY)
self.__invimgray = self.__negate()
self.__ottlvl = self.__OtsuAutoThresh()
self.__binimg = self.__imBinarize()
(self.__seg_col, self.__seg_gray) = self.__cvtBinToColAndGray()
def __negate(self):
inv_img = (self.__imgray).copy()
(r, c) = inv_img.shape
for x in range(0, r, 1):
for y in range(0, c, 1):
inv_img[x,y] = np.invert(inv_img[x,y])
return inv_img
def getColorPlates(self, src_clrimg, plate):
temp_img = src_clrimg.copy()
for x in temp_img:
for y in x:
if plate == 'B':
y[1] = 0
y[2] = 0
elif plate == 'G':
y[0] = 0
y[2] = 0
elif plate == 'R':
y[0] = 0
y[1] = 0
return temp_img
def __rmHoles(self, src_binimg):
ffill_img = src_binimg.copy()
mask = np.zeros((((ffill_img.shape)[0])+2, ((ffill_img.shape)[1])+2), np.uint8, 'C')
cv2.floodFill(ffill_img, mask, (0,0), 255)
final_img = src_binimg | cv2.bitwise_not(ffill_img)
return final_img
def __OtsuAutoThresh(self):
app_grlvls_wth_freq = u.getArrayOfGrayLevelsWithFreq(self.__invimgray)
dt = np.dtype([('wcv', float), ('bcv', float), ('glvl', np.uint8)])
var_ary = np.empty(0, dt, 'C')
for x in range(0, app_grlvls_wth_freq.size, 1):
thrslvl = (app_grlvls_wth_freq[x])[0]
wb = 0.0
mb = 0.0
varb2 = 0.0
wf = 0.0
mf = 0.0
varf2 = 0.0
(wf, mf, varf2) = self.__threshSubPt(x, app_grlvls_wth_freq.size, app_grlvls_wth_freq, wf, mf, varf2)
if (x == 0):
pass
else:
(wb, mb, varb2) = self.__threshSubPt(0, x, app_grlvls_wth_freq, wb, mb, varb2)
wcv = (wb * varb2) + (wf * varf2)
bcv = (wb * wf) * math.pow((mb - mf), 2)
var_ary = np.append(var_ary, np.array([(wcv, bcv, thrslvl)], dtype=dt), 0)
u.quickSort(var_ary, 0, var_ary.size - 1)
ottlvl = (var_ary[0])[2]
return ottlvl
def __threshSubPt(self, lower, upper, app_grlvls_wth_freq, w, m, var2):
for h in range(lower, upper, 1):
w = w + (app_grlvls_wth_freq[h])[1]
m = m + float(np.uint32((app_grlvls_wth_freq[h])[0]) * np.uint32((app_grlvls_wth_freq[h])[1]))
m = m / w
for h in range(lower, upper, 1):
var2 = var2 + float((math.pow((((app_grlvls_wth_freq[h])[0]) - m), 2)) * ((app_grlvls_wth_freq[h])[1]))
var2 = var2 / w
w = w / float((math.pow(app_grlvls_wth_freq.size, 2)))
return (w, m, var2)
def __imBinarize(self):
binimg = np.zeros((self.__invimgray).shape, np.uint8, 'C')
for x in range(0, ((self.__invimgray).shape)[0], 1):
for y in range(0, ((self.__invimgray).shape)[1], 1):
if (self.__invimgray[x, y] < self.__ottlvl):
binimg[x, y] = np.uint8(0)
else:
binimg[x, y] = np.uint8(255)
binimg = self.__rmHoles(binimg)
return binimg
def __cvtBinToColAndGray(self):
seg_col = np.zeros((self.__img).shape, np.uint8, 'C')
seg_gray = np.zeros((self.__imgray).shape, np.uint8, 'C')
i = 0
for x in seg_col:
j = 0
for y in x:
if ((self.__binimg)[i, j] == 255):
y[0] = (self.__img)[i, j, 0]
y[1] = (self.__img)[i, j, 1]
y[2] = (self.__img)[i, j, 2]
seg_gray[i, j] = self.__imgray[i, j]
j = j + 1
i = i + 1
return (seg_col, seg_gray)
def getActImg(self):
return self.__img
def getGrayImg(self):
return self.__imgray
def getInvrtGrayImg(self):
return self.__invimgray
def getBinaryImg(self):
return self.__binimg
def getOtsuThresholdLevel(self):
return self.__ottlvl
def getSegColImg(self):
return self.__seg_col
def getSegGrayImg(self):
return self.__seg_gray
|
import argparse
def options():
p = argparse.ArgumentParser(description = 'Arguments for image2image-translation training.')
p.add_argument('--rec-type', help = 'reconstruction loss type (pixel / feature)', default = 'pixel')
p.add_argument('--gan-loss-type', help = 'gan loss type', default = 'HINGEGAN')
p.add_argument('--rec-weight', type = float, help = 'rec loss weight', default = 10)
p.add_argument('--vgg-weight', type = float, help = 'vgg loss weight', default = 10)
p.add_argument('--kl-weight', type = float, help = 'kl divergence weight', default = 0.05)
p.add_argument('--ds-weight', type = float, help = 'ds loss weight', default = 8)
p.add_argument('--decay-start-epoch', type = int, help = 'what epoch will the learning rate decay start', default = -1)
p.add_argument('--global-freeze-epoch', type = int, help = 'how many epochs will the global network be frozen', default = 10)
p.add_argument('--epoch', type = int, help = 'epoch num', default = 200)
p.add_argument('--bs', type = int, help = 'batch size', default = 1)
p.add_argument('--use-ttur', action = 'store_true', help = 'use TTUR')
p.add_argument('--lr', type = float, help = 'learning rate', default = 0.0002)
p.add_argument('--beta1', type = float, help = 'beta1 parameter for the Adam optimizer', default = 0.0)
p.add_argument('--beta2', type = float, help = 'beta2 parameter for the Adam optimizer', default = 0.9)
p.add_argument('--ic', type = int, help = 'input channel num (when using in label mode, consider the case of unk)', default = 3)
p.add_argument('--oc', type = int, help = 'output channel num', default = 3)
p.add_argument('--height', type = int, help = 'image height (2^n)', default = 256)
p.add_argument('--width', type = int, help = 'image width (2^n)', default = 256)
p.add_argument('--network-mode', help = 'model depth (normal/more/most)', default = 'normal')
p.add_argument('--net-G-type', help = 'Generator Architecture Type', default = 'UNet')
p.add_argument('--use-sn-G', action = 'store_true', help = 'use Spectral Normalization in the Generator')
p.add_argument('--norm-type-G', help = 'normalization type in the Generator', default = 'instancenorm')
p.add_argument('--z-dim', type = int, help = 'latent vector size', default = 0)
p.add_argument('--net-D-type', help = 'Discriminator Architecture Type', default = 'PatchGan')
p.add_argument('--use-sn-D', action = 'store_true', help = 'use Spectral Normalization in the Discriminator')
p.add_argument('--norm-type-D', help = 'normalization type in the Discriminator', default = 'instancenorm')
p.add_argument('--use-sigmoid', action = 'store_true', help = 'use Sigmoid in the last layer of Discriminator')
p.add_argument('--D-scale-num', type = int, help = 'number of multiscale Discriminator', default = 1)
p.add_argument('--use-encoder', action = 'store_true', help = 'use encoder')
p.add_argument('--weight-init-type', help = 'network weight init type (xavier, normal)', default = 'xavier')
p.add_argument('--print-freq', type = int, help = 'prints the loss value every few iterations', default = 100)
p.add_argument('--vis-freq', type = int, help = 'saves the visualization every few iterations', default = 100)
p.add_argument('--vis-pth', help = 'path to save the visualizations', default = 'visualizations/')
p.add_argument('--model-pth', help = 'path to save the final model', default = 'models/model.pth')
p.add_argument('--data-type', help = 'dataloader type (image / label)', default = 'image')
p.add_argument('--trn-src-pth', help = 'train src dataset path', default = 'data/train/src')
p.add_argument('--trn-trg-pth', help = 'train trg dataset path', default = 'data/train/trg')
p.add_argument('--val-src-pth', help = 'val src dataset path', default = 'data/val/src')
p.add_argument('--val-trg-pth', help = 'val trg dataset path', default = 'data/val/trg')
p.add_argument('--label-pth', help = 'label path', default = 'data/label.pkl')
p.add_argument('--num-workers', type = int, help = 'num workers for the dataloader', default = 10)
p.add_argument('--grad-acc', type = int, help = 'split the batch into n steps', default = 1)
p.add_argument('--multigpu', action = 'store_true', help = 'use multiple gpus')
args = p.parse_args()
return args
|
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2016, Mario Santos <mario.rf.santos@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: os_server_metadata
short_description: Add/Update/Delete Metadata in Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.6"
author: "Mario Santos (@ruizink)"
description:
- Add, Update or Remove metadata in compute instances from OpenStack.
options:
server:
description:
- Name of the instance to update the metadata
required: true
aliases: ['name']
meta:
description:
- 'A list of key value pairs that should be provided as a metadata to
the instance or a string containing a list of key-value pairs.
Eg: meta: "key1=value1,key2=value2"'
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Availability zone in which to create the snapshot.
required: false
requirements:
- "python >= 2.7"
- "openstack"
'''
EXAMPLES = '''
# Creates or updates hostname=test1 as metadata of the server instance vm1
- name: add metadata to compute instance
hosts: localhost
tasks:
- name: add metadata to instance
os_server_metadata:
state: present
auth:
auth_url: https://openstack-api.example.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
meta:
hostname: test1
group: group1
# Removes the keys under meta from the instance named vm1
- name: delete metadata from compute instance
hosts: localhost
tasks:
- name: delete metadata from instance
os_server_metadata:
state: absent
auth:
auth_url: https://openstack-api.example.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
meta:
hostname:
group:
'''
RETURN = '''
server_id:
description: The compute instance id where the change was made
returned: success
type: str
sample: "324c4e91-3e03-4f62-9a4d-06119a8a8d16"
metadata:
description: The metadata of compute instance after the change
returned: success
type: dict
sample: {'key1': 'value1', 'key2': 'value2'}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import (openstack_full_argument_spec,
openstack_module_kwargs,
openstack_cloud_from_module)
def _needs_update(server_metadata=None, metadata=None):
if server_metadata is None:
server_metadata = {}
if metadata is None:
metadata = {}
return len(set(metadata.items()) - set(server_metadata.items())) != 0
def _get_keys_to_delete(server_metadata_keys=None, metadata_keys=None):
if server_metadata_keys is None:
server_metadata_keys = []
if metadata_keys is None:
metadata_keys = []
return set(server_metadata_keys) & set(metadata_keys)
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True, aliases=['name']),
meta=dict(required=True, type='dict'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
server_param = module.params['server']
meta_param = module.params['meta']
changed = False
sdk, cloud = openstack_cloud_from_module(module)
try:
server = cloud.get_server(server_param)
if not server:
module.fail_json(
msg='Could not find server {0}'.format(server_param))
if state == 'present':
# check if it needs update
if _needs_update(server_metadata=server.metadata,
metadata=meta_param):
if not module.check_mode:
cloud.set_server_metadata(server_param, meta_param)
changed = True
elif state == 'absent':
# remove from params the keys that do not exist in the server
keys_to_delete = _get_keys_to_delete(server.metadata.keys(),
meta_param.keys())
if len(keys_to_delete) > 0:
if not module.check_mode:
cloud.delete_server_metadata(server_param, keys_to_delete)
changed = True
if changed:
server = cloud.get_server(server_param)
module.exit_json(
changed=changed, server_id=server.id, metadata=server.metadata)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='wrench-documentation',
version='2.0.0',
requires=[
"sphinxcontrib.phpdomain"
]
)
|
# Copyright 2018 Timothée Chauvin
# Copyright 2017-2019 Joseph Lorimer <joseph@lorimer.me>
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import re
from datetime import date
from ssl import _create_unverified_context
from urllib.error import HTTPError
from urllib.parse import urlsplit
from urllib.request import urlopen
from anki.notes import Note
from anki.utils import isMac
from aqt import mw
from aqt.utils import (
chooseList,
getText,
showInfo,
showCritical,
showWarning,
tooltip,
)
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QAbstractItemView,
QDialog,
QDialogButtonBox,
QListWidget,
QListWidgetItem,
QVBoxLayout,
)
from goose3 import Goose
from bs4 import BeautifulSoup, Comment
from requests import get
from requests.exceptions import ConnectionError
from .lib.feedparser import parse
from .pocket import Pocket
from .util import setField
class Importer:
pocket = None
def _goose(self, url):
return Goose().extract(url=url)
def _fetchWebpage(self, url):
if isMac:
context = _create_unverified_context()
html = urlopen(url, context=context).read()
else:
headers = {'User-Agent': self.settings['userAgent']}
html = get(url, headers=headers).content
webpage = BeautifulSoup(html, 'html.parser')
for tagName in self.settings['badTags']:
for tag in webpage.find_all(tagName):
tag.decompose()
for c in webpage.find_all(text=lambda s: isinstance(s, Comment)):
c.extract()
return webpage
def _createNote(self, title, text, source, priority=None):
if self.settings['importDeck']:
deck = mw.col.decks.byName(self.settings['importDeck'])
if not deck:
showWarning(
'Destination deck no longer exists. '
'Please update your settings.'
)
return
did = deck['id']
else:
did = mw.col.conf['curDeck']
model = mw.col.models.byName(self.settings['modelName'])
note = Note(mw.col, model)
setField(note, self.settings['titleField'], title)
setField(note, self.settings['textField'], text)
setField(note, self.settings['sourceField'], source)
if priority:
setField(note, self.settings['prioField'], priority)
note.model()['did'] = did
mw.col.addNote(note)
mw.deckBrowser.show()
return mw.col.decks.get(did)['name']
def importWebpage(self, url=None, priority=None, silent=False):
if not url:
url, accepted = getText('Enter URL:', title='Import Webpage')
else:
accepted = True
if not url or not accepted:
return
if not urlsplit(url).scheme:
url = 'http://' + url
elif urlsplit(url).scheme not in ['http', 'https']:
showCritical('Only HTTP requests are supported.')
return
try:
webpage = self._goose(url)
body = re.sub("(.+)", r"<p>\1</p>", webpage.cleaned_text)
title = webpage.title
except Exception as e:
showWarning(f'Failed to goose {e}. Falling back to normal processing')
return
source = self.settings['sourceFormat'].format(
date=date.today(), url='<a href="%s">%s</a>' % (url, url)
)
if self.settings['prioEnabled'] and not priority:
priority = self._getPriority(title)
deck = self._createNote(title, body, source, priority)
if not silent:
tooltip('Added to deck: {}'.format(deck))
return deck
def _getPriority(self, name=None):
if name:
prompt = 'Select priority for <b>{}</b>'.format(name)
else:
prompt = 'Select priority for import'
return self.settings['priorities'][
chooseList(prompt, self.settings['priorities'])
]
def importFeed(self):
url, accepted = getText('Enter URL:', title='Import Feed')
if not url or not accepted:
return
if not urlsplit(url).scheme:
url = 'http://' + url
log = self.settings['feedLog']
try:
feed = parse(
url,
agent=self.settings['userAgent'],
etag=log[url]['etag'],
modified=log[url]['modified'],
)
except KeyError:
log[url] = {'downloaded': []}
feed = parse(url, agent=self.settings['userAgent'])
if feed['status'] not in [200, 301, 302]:
showWarning(
'The remote server has returned an unexpected status: '
'{}'.format(feed['status'])
)
if self.settings['prioEnabled']:
priority = self._getPriority()
else:
priority = None
entries = [
{'text': e['title'], 'data': e}
for e in feed['entries']
if e['link'] not in log[url]['downloaded']
]
if not entries:
showInfo('There are no new items in this feed.')
return
selected = self._select(entries)
if not selected:
return
n = len(selected)
mw.progress.start(
label='Importing feed entries...', max=n, immediate=True
)
for i, entry in enumerate(selected, start=1):
deck = self.importWebpage(entry['link'], priority, True)
log[url]['downloaded'].append(entry['link'])
mw.progress.update(value=i)
log[url]['etag'] = feed.etag if hasattr(feed, 'etag') else ''
log[url]['modified'] = (
feed.modified if hasattr(feed, 'modified') else ''
)
mw.progress.finish()
tooltip('Added {} item(s) to deck: {}'.format(n, deck))
def importPocket(self):
if not self.pocket:
self.pocket = Pocket()
articles = self.pocket.getArticles()
if not articles:
return
selected = self._select(articles)
if self.settings['prioEnabled']:
priority = self._getPriority()
else:
priority = None
if selected:
n = len(selected)
mw.progress.start(
label='Importing Pocket articles...', max=n, immediate=True
)
for i, article in enumerate(selected, start=1):
deck = self.importWebpage(article['given_url'], priority, True)
if self.settings['pocketArchive']:
self.pocket.archive(article)
mw.progress.update(value=i)
mw.progress.finish()
tooltip('Added {} item(s) to deck: {}'.format(n, deck))
def _select(self, choices):
if not choices:
return []
dialog = QDialog(mw)
layout = QVBoxLayout()
listWidget = QListWidget()
listWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)
for c in choices:
item = QListWidgetItem(c['text'])
item.setData(Qt.UserRole, c['data'])
listWidget.addItem(item)
buttonBox = QDialogButtonBox(
QDialogButtonBox.Close | QDialogButtonBox.Save
)
buttonBox.accepted.connect(dialog.accept)
buttonBox.rejected.connect(dialog.reject)
buttonBox.setOrientation(Qt.Horizontal)
layout.addWidget(listWidget)
layout.addWidget(buttonBox)
dialog.setLayout(layout)
dialog.setWindowModality(Qt.WindowModal)
dialog.resize(500, 500)
choice = dialog.exec_()
if choice == 1:
return [
listWidget.item(i).data(Qt.UserRole)
for i in range(listWidget.count())
if listWidget.item(i).isSelected()
]
return []
|
from django.urls import include, re_path
from rest_framework.routers import DefaultRouter
from .views import OfferViewSet
router = DefaultRouter()
router.register('offers', OfferViewSet)
urlpatterns = [
re_path('^', include(router.urls)),
]
|
import asyncio
import unittest
from unittest import IsolatedAsyncioTestCase
from valheim_server.log_dog import ValheimLogDog
from utils import default
from mongoengine import *
config = default.config()
unittest.TestLoader.sortTestMethodsUsing = None
class TestDeathDocument(Document):
key = UUIDField(primary_key=True)
death_count = IntField(min=0)
class TestValheimLogNonAsync(unittest.TestCase):
def setUp(self):
self.steamID = "76561197999876368"
self.test_dog = ValheimLogDog.data = {
'SteamID':'',
'ZDOID':'',
'steam_login_time':'',
'ZDOID_login_time':'',
'online':False,
}
self.LogDogObject = ValheimLogDog('bot')
@unittest.skip("Individual method not working? works when called from extract_log_parts()")
def test_steamName_from_api(self):
steamName = ValheimLogDog.get_steam_persona(self.LogDogObject, self.steamID)
self.assertEqual(steamName, "choochoolain")
class TestValheimLog(IsolatedAsyncioTestCase):
def setUp(self):
connect('bytebot_test_db', host=f"mongomock://localhost")
self.test_log_message_Steam = 'Got connection SteamID 76561197999876368'
self.test_log_date_Steam = "04/12/2021 19:35:20"
self.test_log_message_zDOID = 'Got character ZDOID from Halfdan : 3267341458:1'
self.test_log_date_zDOID = "04/12/2021 19:35:55"
self.test_log_message_char_death = 'Got character ZDOID from Halfdan : 0:0'
self.test_log_connection_message = 'Connections 1 ZDOS:130588 sent:0 recv:422'
self.test_log_disconnect_message = 'Closing Socket 76561197999876368'
self.steamID = "76561197999876368"
self.test_dog = ValheimLogDog.data = {
'SteamID':'',
'ZDOID':'',
'steam_login_time':'',
'ZDOID_login_time':'',
'online':False,
}
self.LogDogObject = ValheimLogDog('bot')
#self.loop = asyncio.get_event_loop()
def tearDown(self):
disconnect()
async def asyncTearDown(self):
pass
async def test_SteamIDFromValheimLogMessage(self):
steamID = await ValheimLogDog.extract_log_parts(self.LogDogObject, self.test_log_message_Steam, self.test_log_date_Steam)
print(f'steamID: {steamID}')
self.assertEqual(steamID, '76561197999876368')
async def test_zDOIDFromValheimLogMessage(self):
zDOID = await ValheimLogDog.extract_log_parts(self.LogDogObject, self.test_log_message_zDOID, self.test_log_date_zDOID)
print(f'ZDOID: {zDOID}')
self.assertEqual(zDOID, 'Halfdan')
async def test_CharacterDeathFromValheimLogMessage(self):
zDOID = await ValheimLogDog.extract_log_parts(self.LogDogObject, self.test_log_message_char_death, self.test_log_date_zDOID)
print(f'ZDOID: {zDOID}')
self.assertEqual(zDOID, 'Halfdan death!')
async def test_ConnectionMessage(self):
active_connectoins = await ValheimLogDog.extract_log_parts(self.LogDogObject, self.test_log_connection_message, self.test_log_date_Steam)
print(f'active: {active_connectoins}, test_msg: {self.test_log_connection_message}')
self.assertEqual(active_connectoins, '1')
async def test_DisconnectMessage(self):
disconnection = await ValheimLogDog.extract_log_parts(self.LogDogObject, self.test_log_disconnect_message, self.test_log_date_Steam)
self.assertEqual(disconnection, '76561197999876368')
if __name__ == '__main__':
unittest.main()
|
import os
import shutil
import pandas as pd
path_data = '/Users/karin.hrovatin/Documents/H3HerbstHackathon/challenge/git/thermal_barrierlife_prediction/data/'
def mkdir_missing(path, clean_exist=True):
if clean_exist and os.path.isdir(path):
shutil.rmtree(path)
if not os.path.isdir(path):
os.mkdir(path)
path_sorted = path_data + 'train_sorted/'
mkdir_missing(path_sorted)
metadata = pd.read_table(path_data + 'train-orig.csv', sep=',')
for group, data in metadata.groupby('Lifetime'):
path = path_sorted + str(group) + '/'
mkdir_missing(path)
for img in data.Image_ID.values:
img = img + '.tif'
shutil.copyfile(path_data + 'train/' + img, path + img)
|
from PyQt5.QtWidgets import QWidget, QSizePolicy, QGroupBox, QVBoxLayout, QHBoxLayout, QSlider
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt
from widgets.joystick import JoyStick
class CameraControls(QWidget):
"""
Custom Qt Widget to group camera controls together.
"""
camControls = pyqtSignal(tuple)
def __init__(self, *args, **kwargs):
super(CameraControls, self).__init__(*args, **kwargs)
camera_groupbox = QGroupBox("Camera Controls")
camera_layout = QHBoxLayout()
layout = QVBoxLayout()
layout.addWidget(camera_groupbox)
self._joystick = JoyStick()
self._joystick.mouseMoved.connect(self.controls_moved)
self._joystick.mouseDoubleClicked.connect(self.controls_moved)
self._z_slider = QSlider(Qt.Vertical)
self._z_slider.setRange(-1, 1)
self._z_slider.valueChanged.connect(self.controls_moved)
camera_layout.addWidget(self._z_slider)
camera_layout.addWidget(self._joystick)
camera_groupbox.setLayout(camera_layout)
self.setSizePolicy(
QSizePolicy.Maximum,
QSizePolicy.MinimumExpanding
)
self.setLayout(layout)
@pyqtSlot()
def controls_moved(self):
angle = self._joystick.joystickDirection()
z_value = self._z_slider.value()
self.camControls.emit((angle, z_value))
|
from . import halconfig_types as types
from . import halconfig_dependency as dep
name = "FEM"
displayname = "External FEM"
compatibility = dep.Dependency(platform=(dep.Platform.SERIES1, dep.Platform.SERIES2), mcu_type=dep.McuType.RADIO) # EFR32
category = " Radio"
studio_module = {
"basename" : "SDK.HAL.FEM",
"modules" : [types.StudioFrameworkModule("BASE", [types.Framework.ZNET, types.Framework.THREAD, types.Framework.CONNECT]),
types.StudioFrameworkModule("FLEX", [types.Framework.FLEX])],
}
enable = {
"define": "HAL_FEM_ENABLE",
"description": "Enable FEM",
}
options = {
"BSP_FEM_RX": {
"type": types.PRSChannelLocation("BSP_FEM_RX", custom_name="FEM_RX"),
"description": "RX PRS channel",
"longdescription": "PRS channel for RX control (FEM pin CRX). If no TX defined, it is dual use pin.",
},
"BSP_FEM_SLEEP": {
"type": types.PRSChannelLocation("BSP_FEM_SLEEP", custom_name="FEM_SLEEP"),
"description": "Sleep PRS channel",
"longdescription": "PRS channel for sleep control (FEM pin CSD). Optional: If used, must be channel immediately following RX PRS channel.",
},
"BSP_FEM_TX": {
"type": types.PRSChannelLocation("BSP_FEM_TX", custom_name="FEM_TX"),
"description": "TX PRS channel",
"longdescription": "PRS channel for TX control (FEM pin CTX). Optional: If not used, assumes CRX is dual use pin.",
},
"HAL_FEM_RX_ACTIVE": {
"type": "boolean",
"description": "Enable RX mode",
"defaultValue": "False",
"longdescription": "Enable RX mode",
"generate_if_hidden": False,
},
"HAL_FEM_TX_ACTIVE": {
"type": "boolean",
"description": "Enable TX mode",
"defaultValue": "False",
"longdescription": "Enable TX mode",
"generate_if_hidden": False,
},
"BSP_FEM_BYPASS": {
"type": types.Pin(),
"description": "Bypass pin",
"longdescription": "Select pin for the bypass (FEM pin CPS) signal (optional)",
"subcategory": "Bypass",
},
"BSP_FEM_TXPOWER": {
"type": types.Pin(),
"description": "TX power pin",
"longdescription": "Select pin for the tx power mode (FEM pin CHL) signal (optional)",
"subcategory": "Power",
},
"HAL_FEM_BYPASS_ENABLE": {
"type": "boolean",
"description": "Enable bypass mode",
"defaultValue": "False",
"longdescription": "Enable bypass mode",
"subcategory": "Bypass",
"generate_if_hidden": False,
},
"HAL_FEM_TX_HIGH_POWER": {
"type": "boolean",
"description": "Enable high power tx",
"defaultValue": "False",
"longdescription": "Enable high power tx on true, low power on false",
"subcategory": "Power",
"generate_if_hidden": False,
},
}
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
OpenCV DNN单张与多张图像的推断
OpenCV DNN中支持单张图像推断,同时还支持分批次方式的图像推断,对应的两个相关API分别为blobFromImage与blobFromImages,它们的返回对象都
是一个四维的Mat对象-按照顺序分别为NCHW 其组织方式如下:
N表示多张图像
C表示接受输入图像的通道数目
H表示接受输入图像的高度
W表示接受输入图像的宽度
"""
bin_model = "../../../raspberry-auto/models/googlenet/bvlc_googlenet.caffemodel"
config = "../../../raspberry-auto/models/googlenet/bvlc_googlenet.prototxt"
txt = "../../../raspberry-auto/models/googlenet/classification_classes_ILSVRC2012.txt"
def main():
net = cv.dnn.readNetFromCaffe(config, bin_model)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
classes = None
with open(txt, "r") as f:
classes = f.read().rstrip("\n").split("\n")
images = [cv.imread("../../../raspberry-auto/pic/70eb501cjw1dwp7pecgewj.jpg"),
cv.imread("../../../raspberry-auto/pic/Meter_in_word.png"),
cv.imread("../../../raspberry-auto/pic/hw_freebuds3_2.jpg")]
data = cv.dnn.blobFromImages(images, 1.0, (224, 224), (104, 117, 123), False, crop=False)
net.setInput(data)
outs = net.forward()
t, _ = net.getPerfProfile()
text = "Inference time: %.2f ms" % (t * 1000.0 / cv.getTickFrequency())
print(text)
for i in range(len(outs)):
out = outs[i]
class_id = int(np.argmax(out))
confidence = out[class_id]
cv.putText(images[i], text, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
label = "%s: %.4f" % (classes[class_id] if classes else "Class #%d" % class_id, confidence)
cv.putText(images[i], label, (50, 50), cv.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)
cv.imshow("googlenet demo", images[i])
cv.waitKey(0)
if "__main__" == __name__:
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python3
"""Python replacement fot cat.csh.
Defined as:
my $fh = &HgAutomate::mustOpen(">$runDir/cat.csh");
print $fh <<_EOF_
#!/bin/csh -ef
find $outRoot/\$1/ -name "*.psl" | xargs cat | grep "^#" -v | gzip -c > \$2
_EOF_
;
close($fh);
"""
import sys
import os
import gzip
def gzip_str(string_: str) -> bytes:
ret = gzip.compress(string_.encode())
del string_
return ret
def main():
if len(sys.argv) < 3:
sys.exit(f"Usage: {sys.argv[0]} [input] [output] [-v|--verbose]")
_v = False
if "-v" in sys.argv or "--verbose" in sys.argv:
_v = True
in_ = sys.argv[1]
out_ = sys.argv[2]
print(f"Input: {in_}\nOutput: {out_}") if _v else None
psl_filenames = [x for x in os.listdir(in_) if x.endswith(".psl")]
buffer = []
print(f"Psl filenames: {psl_filenames}") if _v else None
# read all psl files excluding those containing #
for fname in psl_filenames:
path = os.path.join(in_, fname)
print(f" Reading {path}...") if _v else None
f = open(path, 'r')
for line in f:
if "#" in line:
continue
buffer.append(line)
f.close()
print(f"Lines in buffer: {len(buffer)}") if _v else None
# zip the strings we get
str_to_zip = "".join(buffer)
print(f"Unzipped string len: {len(str_to_zip)}") if _v else None
zipped_str = gzip_str(str_to_zip)
print(f"Saving {len(zipped_str)} bytes to output")
with open(out_, 'wb') as f:
f.write(zipped_str)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from django.db.models import QuerySet
from django.utils.functional import SimpleLazyObject
from graphene import Field, InputField, ObjectType, Int, Argument, ID, Boolean, List
from graphene.types.base import BaseOptions
from graphene.types.inputobjecttype import InputObjectType, InputObjectTypeContainer
from graphene.types.utils import yank_fields_from_attrs
from graphene.utils.deprecated import warn_deprecation
from graphene.utils.props import props
from graphene_django.fields import DjangoListField
from graphene_django.types import ErrorType
from graphene_django.utils import (
is_valid_django_model,
DJANGO_FILTER_INSTALLED,
maybe_queryset,
)
from .base_types import DjangoListObjectBase, factory_type
from .converter import construct_fields
from .fields import DjangoObjectField, DjangoListObjectField
from .paginations.pagination import BaseDjangoGraphqlPagination
from .registry import get_global_registry, Registry
from .settings import graphql_api_settings
from .utils import get_Object_or_None, queryset_factory
__all__ = (
"DjangoObjectType",
"DjangoInputObjectType",
"DjangoListObjectType",
"DjangoSerializerType",
)
class DjangoObjectOptions(BaseOptions):
fields = None
input_fields = None
interfaces = ()
model = None
queryset = None
registry = None
connection = None
create_container = None
results_field_name = None
filter_fields = ()
input_for = None
filterset_class = None
class DjangoSerializerOptions(BaseOptions):
model = None
queryset = None
serializer_class = None
arguments = None
fields = None
input_fields = None
input_field_name = None
mutation_output = None
output_field_name = None
output_type = None
output_list_type = None
nested_fields = None
class DjangoObjectType(ObjectType):
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
registry=None,
skip_registry=False,
only_fields=(),
exclude_fields=(),
include_fields=(),
filter_fields=None,
interfaces=(),
filterset_class=None,
**options
):
assert is_valid_django_model(model), (
'You need to pass a valid Django Model in {}.Meta, received "{}".'
).format(cls.__name__, model)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
"The attribute registry in {} needs to be an instance of "
'Registry, received "{}".'
).format(cls.__name__, registry)
if not DJANGO_FILTER_INSTALLED and (filter_fields or filterset_class):
raise Exception(
"Can only set filter_fields or filterset_class if Django-Filter is installed"
)
django_fields = yank_fields_from_attrs(
construct_fields(
model, registry, only_fields, include_fields, exclude_fields
),
_as=Field,
)
_meta = DjangoObjectOptions(cls)
_meta.model = model
_meta.registry = registry
_meta.filter_fields = filter_fields
_meta.fields = django_fields
_meta.filterset_class = filterset_class
super(DjangoObjectType, cls).__init_subclass_with_meta__(
_meta=_meta, interfaces=interfaces, **options
)
if not skip_registry:
registry.register(cls)
def resolve_id(self, info):
return self.pk
@classmethod
def is_type_of(cls, root, info):
if isinstance(root, SimpleLazyObject):
root._setup()
root = root._wrapped
if isinstance(root, cls):
return True
if not is_valid_django_model(type(root)):
raise Exception(('Received incompatible instance "{}".').format(root))
model = root._meta.model
return model == cls._meta.model
@classmethod
def get_node(cls, info, id):
try:
return cls._meta.model.objects.get(pk=id)
except cls._meta.model.DoesNotExist:
return None
class DjangoInputObjectType(InputObjectType):
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
container=None,
registry=None,
skip_registry=False,
connection=None,
use_connection=None,
only_fields=(),
exclude_fields=(),
filter_fields=None,
input_for="create",
nested_fields=(),
**options
):
assert is_valid_django_model(model), (
'You need to pass a valid Django Model in {}.Meta, received "{}".'
).format(cls.__name__, model)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
"The attribute registry in {} needs to be an instance of "
'Registry, received "{}".'
).format(cls.__name__, registry)
assert input_for.lower not in ("create", "delete", "update"), (
'You need to pass a valid input_for value in {}.Meta, received "{}".'
).format(cls.__name__, input_for)
input_for = input_for.lower()
if not DJANGO_FILTER_INSTALLED and filter_fields:
raise Exception("Can only set filter_fields if Django-Filter is installed")
django_input_fields = yank_fields_from_attrs(
construct_fields(
model,
registry,
only_fields,
None,
exclude_fields,
input_for,
nested_fields,
),
_as=InputField,
sort=False,
)
for base in reversed(cls.__mro__):
django_input_fields.update(
yank_fields_from_attrs(base.__dict__, _as=InputField)
)
if container is None:
container = type(cls.__name__, (InputObjectTypeContainer, cls), {})
_meta = DjangoObjectOptions(cls)
_meta.by_polar = True
_meta.model = model
_meta.registry = registry
_meta.filter_fields = filter_fields
_meta.fields = django_input_fields
_meta.input_fields = django_input_fields
_meta.connection = connection
_meta.input_for = input_for
_meta.container = container
super(InputObjectType, cls).__init_subclass_with_meta__(
# container=container,
_meta=_meta,
**options,
)
if not skip_registry:
registry.register(cls, for_input=input_for)
@classmethod
def get_type(cls):
"""
This function is called when the unmounted type (InputObjectType instance)
is mounted (as a Field, InputField or Argument)
"""
return cls
class DjangoListObjectType(ObjectType):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
registry=None,
results_field_name=None,
pagination=None,
only_fields=(),
exclude_fields=(),
filter_fields=None,
queryset=None,
filterset_class=None,
**options
):
assert is_valid_django_model(model), (
'You need to pass a valid Django Model in {}.Meta, received "{}".'
).format(cls.__name__, model)
if not DJANGO_FILTER_INSTALLED and filter_fields:
raise Exception("Can only set filter_fields if Django-Filter is installed")
assert isinstance(queryset, QuerySet) or queryset is None, (
"The attribute queryset in {} needs to be an instance of "
'Django model queryset, received "{}".'
).format(cls.__name__, queryset)
results_field_name = results_field_name or "results"
baseType = get_global_registry().get_type_for_model(model)
if not baseType:
factory_kwargs = {
"model": model,
"only_fields": only_fields,
"exclude_fields": exclude_fields,
"filter_fields": filter_fields,
"filterset_class": filterset_class,
"pagination": pagination,
"queryset": queryset,
"registry": registry,
"skip_registry": False,
}
baseType = factory_type("output", DjangoObjectType, **factory_kwargs)
filter_fields = filter_fields or baseType._meta.filter_fields
if pagination:
result_container = pagination.get_pagination_field(baseType)
else:
global_paginator = graphql_api_settings.DEFAULT_PAGINATION_CLASS
if global_paginator:
assert issubclass(global_paginator, BaseDjangoGraphqlPagination), (
'You need to pass a valid DjangoGraphqlPagination class in {}.Meta, received "{}".'
).format(cls.__name__, global_paginator)
global_paginator = global_paginator()
result_container = global_paginator.get_pagination_field(baseType)
else:
result_container = DjangoListField(baseType)
_meta = DjangoObjectOptions(cls)
_meta.model = model
_meta.queryset = queryset
_meta.baseType = baseType
_meta.results_field_name = results_field_name
_meta.filter_fields = filter_fields
_meta.exclude_fields = exclude_fields
_meta.only_fields = only_fields
_meta.filterset_class = filterset_class
_meta.fields = OrderedDict(
[
(results_field_name, result_container),
(
"count",
Field(
Int,
name="totalCount",
description="Total count of matches elements",
),
),
]
)
super(DjangoListObjectType, cls).__init_subclass_with_meta__(
_meta=_meta, **options
)
@classmethod
def RetrieveField(cls, *args, **kwargs):
return DjangoObjectField(cls._meta.baseType, **kwargs)
@classmethod
def BaseType(cls):
return cls._meta.baseType
class DjangoSerializerType(ObjectType):
"""
DjangoSerializerType definition
"""
ok = Boolean(description="Boolean field that return mutation result request.")
errors = List(ErrorType, description="Errors list for the field")
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
serializer_class=None,
queryset=None,
only_fields=(),
include_fields=(),
exclude_fields=(),
pagination=None,
input_field_name=None,
output_field_name=None,
results_field_name=None,
nested_fields=(),
filter_fields=None,
description="",
filterset_class=None,
**options
):
if not serializer_class:
raise Exception("serializer_class is required on all ModelSerializerType")
model = serializer_class.Meta.model
description = description or "ModelSerializerType for {} model".format(
model.__name__
)
input_field_name = input_field_name or "new_{}".format(model._meta.model_name)
output_field_name = output_field_name or model._meta.model_name
input_class = getattr(cls, "Arguments", None)
if not input_class:
input_class = getattr(cls, "Input", None)
if input_class:
warn_deprecation(
(
"Please use {name}.Arguments instead of {name}.Input."
"Input is now only used in ClientMutationID.\nRead more: "
"https://github.com/graphql-python/graphene/blob/2.0/UPGRADE-v2.0.md#mutation-input"
).format(name=cls.__name__)
)
if input_class:
arguments = props(input_class)
else:
arguments = {}
registry = get_global_registry()
factory_kwargs = {
"model": model,
"only_fields": only_fields,
"include_fields": include_fields,
"exclude_fields": exclude_fields,
"filter_fields": filter_fields,
"pagination": pagination,
"queryset": queryset,
"nested_fields": nested_fields,
"registry": registry,
"skip_registry": False,
"filterset_class": filterset_class,
"results_field_name": results_field_name,
}
output_type = registry.get_type_for_model(model)
if not output_type:
output_type = factory_type("output", DjangoObjectType, **factory_kwargs)
output_list_type = factory_type("list", DjangoListObjectType, **factory_kwargs)
django_fields = OrderedDict({output_field_name: Field(output_type)})
global_arguments = {}
for operation in ("create", "delete", "update"):
global_arguments.update({operation: OrderedDict()})
if operation != "delete":
input_type = registry.get_type_for_model(model, for_input=operation)
if not input_type:
# factory_kwargs.update({'skip_registry': True})
input_type = factory_type(
"input", DjangoInputObjectType, operation, **factory_kwargs
)
global_arguments[operation].update(
{input_field_name: Argument(input_type, required=True)}
)
else:
global_arguments[operation].update(
{
"id": Argument(
ID,
required=True,
description="Django object unique identification field",
)
}
)
global_arguments[operation].update(arguments)
_meta = DjangoSerializerOptions(cls)
_meta.mutation_output = cls
_meta.arguments = global_arguments
_meta.fields = django_fields
_meta.output_type = output_type
_meta.output_list_type = output_list_type
_meta.model = model
_meta.queryset = queryset or model._default_manager
_meta.serializer_class = serializer_class
_meta.input_field_name = input_field_name
_meta.output_field_name = output_field_name
_meta.nested_fields = nested_fields
super(DjangoSerializerType, cls).__init_subclass_with_meta__(
_meta=_meta, description=description, **options
)
@classmethod
def list_object_type(cls):
return cls._meta.output_list_type
@classmethod
def object_type(cls):
return cls._meta.output_type
@classmethod
def get_errors(cls, errors):
errors_dict = {cls._meta.output_field_name: None, "ok": False, "errors": errors}
return cls(**errors_dict)
@classmethod
def perform_mutate(cls, obj, info):
resp = {cls._meta.output_field_name: obj, "ok": True, "errors": None}
return cls(**resp)
@classmethod
def get_serializer_kwargs(cls, root, info, **kwargs):
return {}
@classmethod
def manage_nested_fields(cls, data, root, info):
nested_objs = {}
if cls._meta.nested_fields and type(cls._meta.nested_fields) == dict:
for field in cls._meta.nested_fields:
sub_data = data.pop(field, None)
if sub_data:
serialized_data = cls._meta.nested_fields[field](
data=sub_data, many=True if type(sub_data) == list else False
)
ok, result = cls.save(serialized_data, root, info)
if not ok:
return cls.get_errors(result)
if type(sub_data) == list:
nested_objs.update({field: result})
else:
data.update({field: result.id})
return nested_objs
@classmethod
def create(cls, root, info, **kwargs):
data = kwargs.get(cls._meta.input_field_name)
request_type = info.context.META.get("CONTENT_TYPE", "")
if "multipart/form-data" in request_type:
data.update({name: value for name, value in info.context.FILES.items()})
nested_objs = cls.manage_nested_fields(data, root, info)
serializer = cls._meta.serializer_class(
data=data, **cls.get_serializer_kwargs(root, info, **kwargs)
)
ok, obj = cls.save(serializer, root, info)
if not ok:
return cls.get_errors(obj)
elif nested_objs:
[getattr(obj, field).add(*objs) for field, objs in nested_objs.items()]
return cls.perform_mutate(obj, info)
@classmethod
def delete(cls, root, info, **kwargs):
pk = kwargs.get("id")
old_obj = get_Object_or_None(cls._meta.model, pk=pk)
if old_obj:
old_obj.delete()
old_obj.id = pk
return cls.perform_mutate(old_obj, info)
else:
return cls.get_errors(
[
ErrorType(
field="id",
messages=[
"A {} obj with id {} do not exist".format(
cls._meta.model.__name__, pk
)
],
)
]
)
@classmethod
def update(cls, root, info, **kwargs):
data = kwargs.get(cls._meta.input_field_name)
request_type = info.context.META.get("CONTENT_TYPE", "")
if "multipart/form-data" in request_type:
data.update({name: value for name, value in info.context.FILES.items()})
pk = data.pop("id")
old_obj = get_Object_or_None(cls._meta.model, pk=pk)
if old_obj:
nested_objs = cls.manage_nested_fields(data, root, info)
serializer = cls._meta.serializer_class(
old_obj,
data=data,
partial=True,
**cls.get_serializer_kwargs(root, info, **kwargs),
)
ok, obj = cls.save(serializer, root, info)
if not ok:
return cls.get_errors(obj)
elif nested_objs:
[getattr(obj, field).add(*objs) for field, objs in nested_objs.items()]
return cls.perform_mutate(obj, info)
else:
return cls.get_errors(
[
ErrorType(
field="id",
messages=[
"A {} obj with id: {} do not exist".format(
cls._meta.model.__name__, pk
)
],
)
]
)
@classmethod
def save(cls, serialized_obj, root, info, **kwargs):
if serialized_obj.is_valid():
obj = serialized_obj.save()
return True, obj
else:
errors = [
ErrorType(field=key, messages=value)
for key, value in serialized_obj.errors.items()
]
return False, errors
@classmethod
def retrieve(cls, manager, root, info, **kwargs):
pk = kwargs.pop("id", None)
try:
return manager.get_queryset().get(pk=pk)
except manager.model.DoesNotExist:
return None
@classmethod
def list(cls, manager, filterset_class, filtering_args, root, info, **kwargs):
qs = queryset_factory(
cls._meta.queryset or manager, info.field_asts, info.fragments, **kwargs
)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = filterset_class(data=filter_kwargs, queryset=qs).qs
count = qs.count()
return DjangoListObjectBase(
count=count,
results=maybe_queryset(qs),
results_field_name=cls.list_object_type()._meta.results_field_name,
)
@classmethod
def RetrieveField(cls, *args, **kwargs):
return DjangoObjectField(cls._meta.output_type, resolver=cls.retrieve, **kwargs)
@classmethod
def ListField(cls, *args, **kwargs):
return DjangoListObjectField(
cls._meta.output_list_type, resolver=cls.list, **kwargs
)
@classmethod
def CreateField(cls, *args, **kwargs):
return Field(
cls._meta.mutation_output,
args=cls._meta.arguments["create"],
resolver=cls.create,
**kwargs,
)
@classmethod
def DeleteField(cls, *args, **kwargs):
return Field(
cls._meta.mutation_output,
args=cls._meta.arguments["delete"],
resolver=cls.delete,
**kwargs,
)
@classmethod
def UpdateField(cls, *args, **kwargs):
return Field(
cls._meta.mutation_output,
args=cls._meta.arguments["update"],
resolver=cls.update,
**kwargs,
)
@classmethod
def QueryFields(cls, *args, **kwargs):
retrieve_field = cls.RetrieveField(*args, **kwargs)
list_field = cls.ListField(*args, **kwargs)
return retrieve_field, list_field
@classmethod
def MutationFields(cls, *args, **kwargs):
create_field = cls.CreateField(*args, **kwargs)
delete_field = cls.DeleteField(*args, **kwargs)
update_field = cls.UpdateField(*args, **kwargs)
return create_field, delete_field, update_field
|
# Program to scrape twitter photos, text, and usernames
# May 2016
# authors: Clara Wang with significant amount of code taken from source code
# source code: https://gist.github.com/freimanas/39f3ad9a5f0249c0dc64
# NOTE: if there are NO tweets associated with a Twitter handle, no CSV will be produced
import tweepy #https://github.com/tweepy/tweepy
import csv
import urllib
# Consumer keys and access tokens, used for OAuth
consumer_key = "ENTER CONSUMER KEY"
consumer_secret = "ENTER CONSUMER SECRET"
access_key = "ENTER ACCESS KEY"
access_secret = "ENTER ACCESS SECRET"
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
print screen_name
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#initialize a list to hold
#make initial request for most recent tweets (200 is the maximum allowed count)
#if the handle is broken or doesn't exist, exit the function
try:
new_tweets = api.user_timeline(screen_name=str(screen_name), count=1)
except tweepy.TweepError:
return True
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
if len(alltweets) > 0:
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print "getting tweets before %s" % (oldest)
#all subsequent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name=str(screen_name), count=200, max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one; if no tweets indicate so
try:
oldest = alltweets[-1].id - 1
print "...%s tweets downloaded so far" % (len(alltweets))
except IndexError:
print "no tweets" + str(screen_name)
#go through all found tweets and remove the ones with no images
outtweets = [] #initialize master list to hold our ready tweets
# only scrape tweets if there are tweets
if len(alltweets) > 0:
iteration = 1
for tweet in alltweets:
#not all tweets will have media url, so lets skip them
try:
print tweet.entities['media'][0]['media_url']
except (NameError, KeyError):
#we dont want to have any entries without the media_url so lets do nothing
pass
else:
#got media_url - means add it to the output
outtweets.append([tweet.id_str, tweet.created_at, tweet.text.encode("utf-8"), tweet.entities['media'][0]['media_url']])
# download image using media_url
imgurl = tweet.entities['media'][0]['media_url']
imgname = str(screen_name) + str(iteration) + ".jpg"
try:
urllib.urlretrieve(imgurl, imgname)
except IOError:
print screen_name
break
iteration += 1
#write the csv
if len(outtweets) > 0:
with open('%s_tweets.csv' % screen_name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","text","media_url"])
writer.writerows(outtweets)
pass
|
from notebooks.feature_extractors import BaseSegmentExtractor
from typing import List
class WordCounter(BaseSegmentExtractor):
def _segment_extract(self, segment: str) -> List[float]:
return [float(len(segment.split()))]
|
from typing import Dict
from presidio_anonymizer.operators import Operator, OperatorType
class ReplaceInHebrew(Operator):
"""
An instance of the Operator abstract class (@Presidio). For each recognized entity ,this custom
replace it by custom string depending on its entity type.
"""
def operate(self, text: str = None, params: Dict = None) -> str:
""":return: new_value."""
entity_type = params.get("entity_type")
if entity_type in ["PERS", "PER"]:
return "<שם_>"
elif entity_type in ["LOC", "GPE"]:
return "<מיקום_>"
elif entity_type in ["ORG", "FAC"]:
return "<ארגון_>"
elif entity_type in ["CREDIT_CARD", "ISRAELI_ID_NUMBER", "ID"]:
return "<מזהה_>"
elif entity_type in ["EMAIL_ADDRESS", "IP_ADDRESS", "PHONE_NUMBER", "URL"]:
return "<קשר_>"
elif entity_type in ["DATE"]:
return "<תאריך_>"
def validate(self, params: Dict = None) -> None:
"""Validate the new value is string."""
pass
def operator_name(self) -> str:
"""Return operator name."""
return "replace_in_hebrew"
def operator_type(self) -> OperatorType:
"""Return operator type."""
return OperatorType.Anonymize
|
"""
Dissimilarity measures for clustering
"""
import numpy as np
def matching_dissim(a, b, **_):
"""Simple matching dissimilarity function"""
return np.sum(a != b, axis=1)
def jaccard_dissim_binary(a, b, **__):
"""Jaccard dissimilarity function for binary encoded variables"""
if ((a == 0) | (a == 1)).all() and ((b == 0) | (b == 1)).all():
numerator = np.sum(np.bitwise_and(a, b), axis=1)
denominator = np.sum(np.bitwise_or(a, b), axis=1)
if (denominator == 0).any(0):
raise ValueError("Insufficient Number of data since union is 0")
else:
return 1 - numerator / denominator
raise ValueError("Missing or non Binary values detected in Binary columns.")
def jaccard_dissim_label(a, b, **__):
"""Jaccard dissimilarity function for label encoded variables"""
if np.isnan(a.astype('float64')).any() or np.isnan(b.astype('float64')).any():
raise ValueError("Missing values detected in Numeric columns.")
intersect_len = np.empty(len(a), dtype=int)
union_len = np.empty(len(a), dtype=int)
i = 0
for row in a:
intersect_len[i] = len(np.intersect1d(row, b))
union_len[i] = len(np.unique(row)) + len(np.unique(b)) - intersect_len[i]
i += 1
if (union_len == 0).any():
raise ValueError("Insufficient Number of data since union is 0")
return 1 - intersect_len / union_len
def euclidean_dissim(a, b, **_):
"""Euclidean distance dissimilarity function"""
if np.isnan(a).any() or np.isnan(b).any():
raise ValueError("Missing values detected in numerical columns.")
return np.sum((a - b), axis=1)
#return np.sum((a - b) ** 2, axis=1)
def chebyshev_dissim(a,b, **_):
if np.isnan(a).any() or np.isnan(b).any():
raise ValueError("Missing values detected in numerical columns.")
return np.sum(max(a - b), axis=1)
def mahalanobis(a=None, X=None, cov=None):
a_minus_mean = a - np.mean(X)
if not cov:
cov = np.cov(X.values.T)
inv_cov = sp.linalg.inv(cov)
left_term = np.dot(a_minus_mean, inv_cov)
mahala = np.dot(left_term, a_minus_mean.T)
return mahala.diagonal()
#####
def covar(x, y):
x_mean = np.mean(x)
y_mean = np.mean(y)
Cov_numerator = sum(((a - x_mean)*(b - y_mean)) for a, b in zip(x, y))
Cov_denomerator = len(x) - 1
Covariance = (Cov_numerator / Cov_denomerator)
return Covariance
def mahalanobis_dissim(x, y, cov=None):
x_mean = np.mean(x)
x_minus_mn_with_transpose =np.transpose(x- x_mean)
Covariance = covar(x, np.transpose(y))
inv_covmat = np.linalg.inv(Covariance)
x_minus_mn = x - x_mean
left_term = (x_minus_mn, inv_covmat)
D_square = np.dot(left_term, x_minus_mn_with_transpose)
return D_square
from scipy.spatial import distance
def maha(a,b, df):
VI = df.cov()
return distance.mahalanobis(a,b,VI)
###
def ng_dissim(a, b, X=None, membship=None):
"""Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
This function can potentially speed up training convergence.
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
In case of missing membship, this function reverts back to
matching dissimilarity (e.g., when predicting).
"""
# Without membership, revert to matching dissimilarity
if membship is None:
return matching_dissim(a, b)
def calc_cjr(b, X, memj, idr):
"""Num objects w/ category value x_{i,r} for rth attr in jth cluster"""
xcids = np.where(memj == 1)
return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))
def calc_dissim(b, X, memj, idr):
# Size of jth cluster
cj = float(np.sum(memj))
return (1.0 - (calc_cjr(b, X, memj, idr) / cj)) if cj != 0.0 else 0.0
if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:
raise ValueError("'membship' must be a rectangular array where "
"the number of rows in 'membship' equals the "
"number of rows in 'a' and the number of "
"columns in 'membship' equals the number of rows in 'X'.")
return np.array([np.array([calc_dissim(b, X, membship[idj], idr)
if b[idr] == t else 1.0
for idr, t in enumerate(val_a)]).sum(0)
for idj, val_a in enumerate(a)])
|
#!/usr/bin/env python3
import FTree as ft
if __name__ == "__main__":
z = ft.Gates()
G4 = z.or_gate([["4"]], [["5"]])
G5 = z.or_gate([["6"]], [["7"]])
G6 = z.or_gate([["6"]], [["8"]])
G2 = z.and_gate(G4, G5)
G3 = z.or_gate([["3"]], G6)
G1 = z.or_gate(G2, G3)
G0 = z.or_gate([["1"]], z.or_gate(G1, [["2"]]))
mcs = z.mcs(G0)
z.pretty_display(mcs)
|
#!/usr/bin/env python3
# 用于处理用户信息及操作
import os
import sys
import json as JSON
from database_handle import database_handle as dh
from encrypt_and_decrypt import encrypt_and_decrypt_for_users as eadfu
class handle_users():
def __init__(self):
self.dh = dh()
self.eadfu = eadfu()
def add_user(self, info={"basic": {"name": "", "email": ""}, "password": "", "permission": ""}):
if "permission" not in info.keys():
info["permission"] = "Normal"
self.dh.add_users([{"info": info["basic"], "permission": info["permission"]}])
with open("static/users/"+info["basic"]["name"]+".json", "w") as file:
info_json = JSON.dumps(info)
JSON.dump(info_json, file)
self.eadfu.encrypt(info["basic"]["name"])
return
def remove_user(self, user):
self.dh.remove_user(user)
result = os.popen("rm static/users/"+str(user)+"*").read()
def login(self, info={"name": "", "password": ""}):
try:
self.eadfu.decrypt(info["name"])
except:
return False, "No Such User!"
with open("static/users/"+info["name"]+".json") as file:
data = JSON.load(file)
self.eadfu.encrypt(info["name"])
if info["password"] == data["password"]:
del data["password"]
return True, data
else:
return False, "Wrong Password!"
def change_password(self, info={"name": "", "password": "", "new_password": ""}):
try:
self.eadfu.decrypt(info["name"])
except:
return False, "No Such User!"
with open("static/users/"+info["name"]+".json") as file:
data = JSON.load(file)
if info["password"] == data["password"]:
data["password"] = info["new_password"]
with open("static/users/"+info["name"]+".json", "w") as file:
JSON.dump(data, file)
self.eadfu.encrypt(info["name"])
del data["password"]
return True, data
else:
self.eadfu.encrypt(info["name"])
return False, "Wrong Password!"
def change_email(self, info={"name": "", "email": ""}):
try:
self.eadfu.decrypt(info["name"])
except:
return False, "No Such User!"
with open("static/users/"+info["name"]+".json") as file:
data = JSON.load(file)
data["email"] = info["email"]
with open("static/users/"+info["name"]+".json", "w") as file:
JSON.dump(data, file)
self.eadfu.encrypt(info["name"])
del data["password"]
return True, data
def get_user_info(self, user):
try:
self.eadfu.decrypt(user)
except:
return False, "No Such User!"
with open("static/users/"+user+".json") as file:
data = JSON.load(file)
self.eadfu.encrypt(user)
del data["password"]
return True, data
if __name__ == "__main__":
hu = handle_users()
hu.add_user({"basic": {"name": "a", "email": "a@a.a"}, "password": "aaaaaaaaa"})
|
from .evaluation import Evaluation
from .eval_from_dict import EvaluationFromDict
from .eval_array_xr import EvaluationFromArrayXr
|
from django.apps import AppConfig
from django.db import connection
from django.db.models.signals import post_migrate
def index_adjustments(sender, using=None, **kwargs):
"""
Remove -like indexes (varchar_pattern_ops) on UUID fields and create
version-unique indexes for models that have a VERSION_UNIQUE attribute.
:param AppConfig sender:
:param str sender: database alias
:param kwargs:
"""
from versions.util.postgresql import (
remove_uuid_id_like_indexes,
create_current_version_unique_indexes,
create_current_version_unique_identity_indexes
)
remove_uuid_id_like_indexes(sender.name, using)
create_current_version_unique_indexes(sender.name, using)
create_current_version_unique_identity_indexes(sender.name, using)
class VersionsTestsConfig(AppConfig):
name = 'versions_tests'
verbose_name = "Versions Tests default application configuration"
def ready(self):
"""
For postgresql only, remove like indexes for uuid columns and
create version-unique indexes.
This will only be run in django >= 1.7.
:return: None
"""
if connection.vendor == 'postgresql':
post_migrate.connect(index_adjustments, sender=self)
|
import _config
import _utils
CONFIG_PATH = "config.toml"
def main():
config = _config.read(CONFIG_PATH)
for path_name in [x.in_ for x in config.directorios]:
_utils.list_jpg_files_in_dir(path_name)
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.11 on 2020-04-06 11:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0088_auto_20200319_1053'),
('resources', '0114_auto_20200317_0836'),
]
operations = [
]
|
# http://www.pythonchallenge.com/pc/return/cat.html
# http://www.pythonchallenge.com/pc/return/uzi.html
import calendar
import datetime
def main():
valid = []
# Iterate "1__6" years to match Calendar image
for year in range(1006, 2000, 10):
# Calendar shows Feb 29, must be a leap year
if calendar.isleap(year):
# Day after marked day is Tuesday, January 27th
target = datetime.date(year, 1, 27)
if target.weekday() == calendar.TUESDAY:
valid.append(target)
# "he ain't the youngest, he is the second"
final = valid[-2]
return final
if __name__ == "__main__":
print(main())
# Wolfgang Amadeus Mozart, born on January 27, 1756
# http://www.pythonchallenge.com/pc/return/mozart.html
|
# coding: utf-8
import pprint
import re
import six
class WebImageWordsBlockList:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'words': 'str',
'confidence': 'float',
'location': 'list[int]',
'extracted_data': 'object',
'contact_info': 'object',
'image_size': 'object',
'height': 'int',
'width': 'int',
'name': 'str',
'phone': 'str',
'province': 'str',
'city': 'str',
'district': 'str',
'detail_address': 'str'
}
attribute_map = {
'words': 'words',
'confidence': 'confidence',
'location': 'location',
'extracted_data': 'extracted_data',
'contact_info': 'contact_info',
'image_size': 'image_size',
'height': 'height',
'width': 'width',
'name': 'name',
'phone': 'phone',
'province': 'province',
'city': 'city',
'district': 'district',
'detail_address': 'detail_address'
}
def __init__(self, words=None, confidence=None, location=None, extracted_data=None, contact_info=None, image_size=None, height=None, width=None, name=None, phone=None, province=None, city=None, district=None, detail_address=None):
"""WebImageWordsBlockList - a model defined in huaweicloud sdk"""
self._words = None
self._confidence = None
self._location = None
self._extracted_data = None
self._contact_info = None
self._image_size = None
self._height = None
self._width = None
self._name = None
self._phone = None
self._province = None
self._city = None
self._district = None
self._detail_address = None
self.discriminator = None
if words is not None:
self.words = words
if confidence is not None:
self.confidence = confidence
if location is not None:
self.location = location
if extracted_data is not None:
self.extracted_data = extracted_data
if contact_info is not None:
self.contact_info = contact_info
if image_size is not None:
self.image_size = image_size
if height is not None:
self.height = height
if width is not None:
self.width = width
if name is not None:
self.name = name
if phone is not None:
self.phone = phone
if province is not None:
self.province = province
if city is not None:
self.city = city
if district is not None:
self.district = district
if detail_address is not None:
self.detail_address = detail_address
@property
def words(self):
"""Gets the words of this WebImageWordsBlockList.
文字块识别结果。
:return: The words of this WebImageWordsBlockList.
:rtype: str
"""
return self._words
@words.setter
def words(self, words):
"""Sets the words of this WebImageWordsBlockList.
文字块识别结果。
:param words: The words of this WebImageWordsBlockList.
:type: str
"""
self._words = words
@property
def confidence(self):
"""Gets the confidence of this WebImageWordsBlockList.
相关字段的置信度信息,置信度越大,表示本次识别的对应字段的可靠性越高,在统计意义上,置信度越大,准确率越高。 置信度由算法给出,不直接等价于对应字段的准确率。
:return: The confidence of this WebImageWordsBlockList.
:rtype: float
"""
return self._confidence
@confidence.setter
def confidence(self, confidence):
"""Sets the confidence of this WebImageWordsBlockList.
相关字段的置信度信息,置信度越大,表示本次识别的对应字段的可靠性越高,在统计意义上,置信度越大,准确率越高。 置信度由算法给出,不直接等价于对应字段的准确率。
:param confidence: The confidence of this WebImageWordsBlockList.
:type: float
"""
self._confidence = confidence
@property
def location(self):
"""Gets the location of this WebImageWordsBlockList.
文字块的区域位置信息,列表形式,包含文字区域四个顶点的二维坐标(x,y);坐标原点为图片左上角,x轴沿水平方向,y轴沿竖直方向。
:return: The location of this WebImageWordsBlockList.
:rtype: list[int]
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this WebImageWordsBlockList.
文字块的区域位置信息,列表形式,包含文字区域四个顶点的二维坐标(x,y);坐标原点为图片左上角,x轴沿水平方向,y轴沿竖直方向。
:param location: The location of this WebImageWordsBlockList.
:type: list[int]
"""
self._location = location
@property
def extracted_data(self):
"""Gets the extracted_data of this WebImageWordsBlockList.
提取出的结构化JSON结果,该字典内的key值与入参列表extract_type的值一致,目前仅支持联系人信息提取,亦即key值为\"contact_info\"的字段。 若入参extract_type为空列表或该字段缺失时,不进行提取,此字段为空。
:return: The extracted_data of this WebImageWordsBlockList.
:rtype: object
"""
return self._extracted_data
@extracted_data.setter
def extracted_data(self, extracted_data):
"""Sets the extracted_data of this WebImageWordsBlockList.
提取出的结构化JSON结果,该字典内的key值与入参列表extract_type的值一致,目前仅支持联系人信息提取,亦即key值为\"contact_info\"的字段。 若入参extract_type为空列表或该字段缺失时,不进行提取,此字段为空。
:param extracted_data: The extracted_data of this WebImageWordsBlockList.
:type: object
"""
self._extracted_data = extracted_data
@property
def contact_info(self):
"""Gets the contact_info of this WebImageWordsBlockList.
该字段表示提取的联系人信息,包括:姓名、联系电话、省市区以及详细地址。 若入参extract_type列表中无该字段,则此字段不存在。
:return: The contact_info of this WebImageWordsBlockList.
:rtype: object
"""
return self._contact_info
@contact_info.setter
def contact_info(self, contact_info):
"""Sets the contact_info of this WebImageWordsBlockList.
该字段表示提取的联系人信息,包括:姓名、联系电话、省市区以及详细地址。 若入参extract_type列表中无该字段,则此字段不存在。
:param contact_info: The contact_info of this WebImageWordsBlockList.
:type: object
"""
self._contact_info = contact_info
@property
def image_size(self):
"""Gets the image_size of this WebImageWordsBlockList.
该字段表示返回图片宽高信息。 如入参extract_type列表中无该字段,则此字段不存在。
:return: The image_size of this WebImageWordsBlockList.
:rtype: object
"""
return self._image_size
@image_size.setter
def image_size(self, image_size):
"""Sets the image_size of this WebImageWordsBlockList.
该字段表示返回图片宽高信息。 如入参extract_type列表中无该字段,则此字段不存在。
:param image_size: The image_size of this WebImageWordsBlockList.
:type: object
"""
self._image_size = image_size
@property
def height(self):
"""Gets the height of this WebImageWordsBlockList.
传入image_size时的返回,为图像高度。
:return: The height of this WebImageWordsBlockList.
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this WebImageWordsBlockList.
传入image_size时的返回,为图像高度。
:param height: The height of this WebImageWordsBlockList.
:type: int
"""
self._height = height
@property
def width(self):
"""Gets the width of this WebImageWordsBlockList.
传入image_size时的返回,为图像宽度。
:return: The width of this WebImageWordsBlockList.
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this WebImageWordsBlockList.
传入image_size时的返回,为图像宽度。
:param width: The width of this WebImageWordsBlockList.
:type: int
"""
self._width = width
@property
def name(self):
"""Gets the name of this WebImageWordsBlockList.
传入contact_info时的返回,为姓名。
:return: The name of this WebImageWordsBlockList.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this WebImageWordsBlockList.
传入contact_info时的返回,为姓名。
:param name: The name of this WebImageWordsBlockList.
:type: str
"""
self._name = name
@property
def phone(self):
"""Gets the phone of this WebImageWordsBlockList.
传入contact_info时的返回,联系电话。
:return: The phone of this WebImageWordsBlockList.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this WebImageWordsBlockList.
传入contact_info时的返回,联系电话。
:param phone: The phone of this WebImageWordsBlockList.
:type: str
"""
self._phone = phone
@property
def province(self):
"""Gets the province of this WebImageWordsBlockList.
传入contact_info时的返回,省。
:return: The province of this WebImageWordsBlockList.
:rtype: str
"""
return self._province
@province.setter
def province(self, province):
"""Sets the province of this WebImageWordsBlockList.
传入contact_info时的返回,省。
:param province: The province of this WebImageWordsBlockList.
:type: str
"""
self._province = province
@property
def city(self):
"""Gets the city of this WebImageWordsBlockList.
传入contact_info时的返回,市。
:return: The city of this WebImageWordsBlockList.
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this WebImageWordsBlockList.
传入contact_info时的返回,市。
:param city: The city of this WebImageWordsBlockList.
:type: str
"""
self._city = city
@property
def district(self):
"""Gets the district of this WebImageWordsBlockList.
传入contact_info时的返回,县区。
:return: The district of this WebImageWordsBlockList.
:rtype: str
"""
return self._district
@district.setter
def district(self, district):
"""Sets the district of this WebImageWordsBlockList.
传入contact_info时的返回,县区。
:param district: The district of this WebImageWordsBlockList.
:type: str
"""
self._district = district
@property
def detail_address(self):
"""Gets the detail_address of this WebImageWordsBlockList.
传入contact_info时的返回,详细地址(不含省市区)。
:return: The detail_address of this WebImageWordsBlockList.
:rtype: str
"""
return self._detail_address
@detail_address.setter
def detail_address(self, detail_address):
"""Sets the detail_address of this WebImageWordsBlockList.
传入contact_info时的返回,详细地址(不含省市区)。
:param detail_address: The detail_address of this WebImageWordsBlockList.
:type: str
"""
self._detail_address = detail_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WebImageWordsBlockList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Accession', fields ['uqm_accession']
db.delete_unique(u'botanycollection_accession', ['uqm_accession'])
def backwards(self, orm):
# Adding unique constraint on 'Accession', fields ['uqm_accession']
db.create_unique(u'botanycollection_accession', ['uqm_accession'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'botanycollection.accession': {
'Meta': {'object_name': 'Accession'},
'accession_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'altitude': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'biological_synonym': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'collection_date': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'collector': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'collector_serial_no': ('django.db.models.fields.CharField', [], {'max_length': '22', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contributor': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '27', 'blank': 'True'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'date_contributed': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'detdate': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}),
'detna': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'family': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_level_flag': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'indigenous_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'lat_long': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'location_notes': ('django.db.models.fields.CharField', [], {'max_length': '162', 'blank': 'True'}),
'material': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'preservation_state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'related_accession': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'sample_number': ('django.db.models.fields.CharField', [], {'max_length': '26', 'blank': 'True'}),
'site_name': ('django.db.models.fields.CharField', [], {'max_length': '214', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '67', 'blank': 'True'}),
'source_number': ('django.db.models.fields.CharField', [], {'max_length': '26', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'species_author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sspau': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sspna': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'subfam': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'tribe': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'unique_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '24', 'blank': 'True'}),
'uqm_accession': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'varau': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'varna': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'weblinks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'botanycollection.accessionphoto': {
'Meta': {'object_name': 'AccessionPhoto'},
'accession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['botanycollection.Accession']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'botanycollection.seedfeatures': {
'Meta': {'object_name': 'SeedFeatures'},
'accession': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['botanycollection.Accession']", 'unique': 'True'}),
'anatomy_longitudinal_sections': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'anatomy_transverse_section': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'embryo_endosperm': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hilum_details': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'other_identification_information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'references_and_links': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seed_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'shape_2d': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'shape_3d': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'shape_detail': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'special_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'surface_inner_texture': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'surface_outer_texture': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'testa_endocarp_thickness': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
u'botanycollection.woodfeatures': {
'Meta': {'object_name': 'WoodFeatures'},
'accession': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['botanycollection.Accession']", 'unique': 'True'}),
'aggregate_rays': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_canals': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_arrangement1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_arrangement2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_arrangement3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_arrangement4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_arrangement5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_bands': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_present': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_resin_canals': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_tracheid_pits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'cambial_variants': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'crassulae': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'druses': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'early_late_wood_transition': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'early_wood_ray_pits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'epithelial_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fibre_helical_thickenings': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fibre_pits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fibre_wall_thickness': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fusiform_parenchyma_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'helical_thickenings': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_phloem': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'intervessel_pit_arrangement': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'intervessel_pit_size': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'intervessel_tracheid_pit_shapes': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'lactifer_tanniferous_tubes': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'late_wood_ray_pits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'nodular_tangential_ray_walls': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'parenchyma_like_fibres_present': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'perforation_plates_types': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'prismatic_crystals': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'radial_secretory_canals': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'radial_tracheids_for_gymnosperms': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'ray_height': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ray_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ray_width': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays_cellular_composition': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'rays_sheath_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays_structure': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reference_specimens': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'silica': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'solitary_vessels_with_angular_outline': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'spetate_fibres_present': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'spiral_thickenings': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'storied_structure': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'tile_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'tracheid_diameter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vascularvasicentric_tracheids_present': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'vessel_arrangement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessel_grouping': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessel_porosity': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_deposits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_rays_pitting': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_tyloses': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'walls': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['botanycollection']
|
import math
import pyxel
class Pycocam:
__PI = 3.141
__PI2 = 6.283
def __init__(self, width, height):
self._theta = None
self.z = 0
self.focallength = 5
self.fov = 45
self.theta = 0
self.width = width
self.height = height
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, v):
v %= self.__PI2
self._theta = v
def line(self, p1, p2, col):
try:
px_1 = self._coordstopx(self._perspective(p1))
px_2 = self._coordstopx(self._perspective(p2))
except ZeroDivisionError:
return
pyxel.line(px_1[0], px_1[1], px_2[0], px_2[1], col)
def point(self, p, col):
try:
px = self._coordstopx(self._perspective(p))
except ZeroDivisionError:
return
pyxel.pset(px[0], px[1], col)
def _perspective(self, p):
x, y, z = p
y *= -1
x_rot = x * math.cos(self.theta) - z * math.sin(self.theta)
z_rot = x * math.sin(self.theta) + z * math.cos(self.theta)
dz = z_rot - self.z
m_xz = x_rot / dz
m_yz = y / dz
out_z = self.z + self.focallength
out_x = m_xz * out_z
out_y = m_yz * out_z
return (out_x, out_y)
def _map(self, v, a, b, c, d):
return ((v - a) / (b - a)) * (d - c) + c
def _coordstopx(self, coords):
x, y = coords
radius = self.focallength * math.tan(self.fov / 2 / 360)
pixel_x = self._map(x, -radius, radius, 0, self.width)
pixel_y = self._map(y, -radius, radius, 0, self.height)
return (pixel_x, pixel_y)
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json, rdflib_jsonld
import re
import sys
import idutils
import rdflib
import requests
from rdflib import Namespace, Graph, URIRef, plugin
from rdflib.namespace import RDF
from rdflib.namespace import DCTERMS
from rdflib.namespace import DC
from rdflib.namespace import FOAF
from rdflib.namespace import SDO #schema.org
from fuji_server.helper.metadata_collector import MetaDataCollector
from fuji_server.helper.metadata_collector_schemaorg import MetaDataCollectorSchemaOrg
from fuji_server.helper.request_helper import RequestHelper, AcceptTypes
from fuji_server.helper.metadata_mapper import Mapper
from fuji_server.helper.preprocessor import Preprocessor
from pyld import jsonld
class MetaDataCollectorRdf(MetaDataCollector):
"""
A class to collect the metadata given the Resource Description Framework (RDF) graph. This class is child class of MetadataCollector.
...
Attributes
----------
source_name : str
Source name of metadata
target_url : str
Target URL of the metadata
content_type : str
Content type of HTTP response
rdf_graph : rdflib.ConjunctiveGraph
An object of RDF graph
Methods
--------
parse_metadata()
Method to parse the metadata given RDF graph
get_default_metadata(g)
Method to get the default metadata given RDF graph
get_metadata(g, item, type='Dataset')
Method to get the core metadata in RDF graph
get_ontology_metadata(graph)
Method to get ontology by matching the type of IRI into OWL or SKOS class in the RDF graph
get_dcat_metadata(graph)
Method to get Data Catalog(DCAT) metadata in RDF graph
get_content_type()
Method to get the content type attribute in the class
get_metadata_from_graph(g)
Method to get all metadata from a graph object
"""
target_url = None
def __init__(self, loggerinst, target_url, source):
"""
Parameters
----------
source : str
Source of metadata
loggerinst : logging.Logger
Logger instance
target_url : str
Target URL
rdf_graph : rdflib.ConjunctiveGraph, optional
RDF graph, default=None
"""
self.target_url = target_url
self.content_type = None
self.source_name = source
#self.rdf_graph = rdf_graph
super().__init__(logger=loggerinst)
def set_namespaces(self,graph):
namespaces = {}
known_namespace_regex = [r'https?:\/\/vocab\.nerc\.ac\.uk\/collection\/[A-Z][0-9]+\/current\/',
r'https?:\/\/purl\.obolibrary\.org\/obo\/[a-z]+(\.owl|#)']
try:
nm = graph.namespace_manager
possible = set(graph.predicates()).union(graph.objects(None, RDF.type))
alluris = set(graph.objects()).union(set(graph.subjects()))
#namespaces from mentioned objects and subjects uris (best try)
for uri in alluris:
if idutils.is_url(uri):
for known_pattern in known_namespace_regex:
kpm = re.match(known_pattern, uri)
if kpm :
uri = kpm[0]
self.namespaces.append(uri)
else:
uri = str(uri).strip().rstrip("/#")
namespace_candidate = uri.rsplit('/', 1)[0]
if namespace_candidate != uri:
self.namespaces.append(namespace_candidate)
else:
namespace_candidate = uri.rsplit('#', 1)[0]
if namespace_candidate != uri:
self.namespaces.append(namespace_candidate)
#defined namespaces
for predicate in possible:
prefix, namespace, local = nm.compute_qname(predicate)
namespaces[prefix] = namespace
self.namespaces.append(str(namespace))
self.namespaces = list(set(self.namespaces))
except Exception as e:
self.logger.info('FsF-F2-01M : RDF Namespace detection error -: {}'.format(e))
return namespaces
def get_metadata_from_graph(self, rdf_response_graph):
rdf_metadata ={}
if rdf_response_graph:
ontology_indicator = [
rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#'),
rdflib.term.URIRef('http://www.w3.org/2002/07/owl#')
]
if isinstance(rdf_response_graph, rdflib.graph.Graph):
self.logger.info('FsF-F2-01M : Found RDF Graph which was sucessfully parsed')
self.logger.info('FsF-F2-01M : Trying to identify namespaces in RDF Graph')
graph_namespaces = self.set_namespaces(rdf_response_graph)
#self.getNamespacesfromIRIs(graph_text)
# TODO: set credit score for being valid RDF
# TODO: since its valid RDF aka semantic representation, make sure FsF-I1-01M is passed and scored
if rdflib.term.URIRef('http://www.w3.org/ns/dcat#') in graph_namespaces.values():
self.logger.info('FsF-F2-01M : RDF Graph seems to contain DCAT metadata elements')
rdf_metadata = self.get_dcat_metadata(rdf_response_graph)
elif rdflib.term.URIRef('http://schema.org/') in graph_namespaces.values():
self.logger.info('FsF-F2-01M : RDF Graph seems to contain schema.org metadata elements')
rdf_metadata = self.get_schemaorg_metadata(rdf_response_graph)
elif bool(set(ontology_indicator) & set(graph_namespaces.values())):
rdf_metadata = self.get_ontology_metadata(rdf_response_graph)
#else:
if not rdf_metadata:
#try to find root node
typed_objects = list(rdf_response_graph.objects(predicate=RDF.type))
if typed_objects:
typed_nodes = list(rdf_response_graph[:RDF.type:typed_objects[0]])
if typed_nodes:
rdf_metadata = self.get_metadata(rdf_response_graph, typed_nodes[0], str(typed_objects[0]))
if not rdf_metadata:
rdf_metadata = self.get_default_metadata(rdf_response_graph)
#add found namespaces URIs to namespace
#for ns in graph_namespaces.values():
# self.namespaces.append(ns)
else:
self.logger.info('FsF-F2-01M : Expected RDF Graph but received -: {0}'.format(self.content_type))
return rdf_metadata
def parse_metadata(self):
"""Parse the metadata given RDF graph.
Returns
------
str
a string of source name
dict
a dictionary of metadata in RDF graph
"""
#self.source_name = self.getEnumSourceNames().LINKED_DATA.value
#self.logger.info('FsF-F2-01M : Trying to request RDF metadata from -: {}'.format(self.source_name))
rdf_metadata = dict()
rdf_response_graph = None
#if self.rdf_graph is None:
requestHelper: RequestHelper = RequestHelper(self.target_url, self.logger)
requestHelper.setAcceptType(AcceptTypes.rdf)
neg_source, rdf_response = requestHelper.content_negotiate('FsF-F2-01M')
#required for metric knowledge representation
if requestHelper.response_content is not None:
self.content_type = requestHelper.content_type
if self.content_type is not None:
self.content_type = self.content_type.split(';', 1)[0]
#handle JSON-LD
DCAT = Namespace('http://www.w3.org/ns/dcat#')
if self.content_type == 'application/ld+json':
self.logger.info('FsF-F2-01M : Try to parse RDF (JSON-LD) from -: %s' % (self.target_url))
try:
#this is a workaraund for a rdflib JSON-LD parsing issue proposed here: https://github.com/RDFLib/rdflib/issues/1423
try:
if rdf_response['@context'].startswith('http://schema.org'):
rdf_response['@context'] = 'https://schema.org/docs/jsonldcontext.json'
except Exception as e:
pass
rdf_response = jsonld.expand( rdf_response)
rdf_response = json.dumps(rdf_response)
jsonldgraph = rdflib.ConjunctiveGraph()
rdf_response_graph = jsonldgraph.parse(data=rdf_response, format='json-ld')
rdf_response_graph = jsonldgraph
except Exception as e:
print('JSON-LD parsing error',e)
self.logger.info('FsF-F2-01M : Parsing error, failed to extract JSON-LD -: {}'.format(e))
else:
# parse RDF
parseformat = re.search(r'[\/+]([a-z0-9]+)$', str(requestHelper.content_type))
if parseformat:
if 'html' not in str(parseformat[1]) and 'zip' not in str(parseformat[1]) :
RDFparsed = False
self.logger.info('FsF-F2-01M : Try to parse RDF from -: %s' % (self.target_url))
while not RDFparsed:
try:
graph = rdflib.Graph(identifier = self.target_url)
graph.parse(data=rdf_response, format=parseformat[1])
rdf_response_graph = graph
RDFparsed = True
except Exception as e:
errorlinematch = re.search(r'\sline\s([0-9]+)',str(e))
if errorlinematch:
badline = int(errorlinematch[1])
self.logger.warning(
'FsF-F2-01M : Failed to parse RDF, trying to fix and retry parsing everything before line -: %s ' % str(badline))
splitRDF = rdf_response.splitlines()
if len(splitRDF) >=1 and badline <= len(splitRDF) and badline > 1:
rdf_response = b'\n'.join(splitRDF[:badline-1])
else:
RDFparsed = True # end reached
else:
RDFparsed = True # give up
if not RDFparsed:
continue
else:
self.logger.warning(
'FsF-F2-01M : Failed to parse RDF -: %s %s' % (self.target_url, str(e)))
else:
self.logger.info('FsF-F2-01M : Seems to be HTML not RDF, therefore skipped parsing RDF from -: %s' % (self.target_url))
else:
self.logger.info('FsF-F2-01M : Could not determine RDF serialisation format for -: {}'.format(self.target_url))
#else:
# neg_source, rdf_response = 'html', self.rdf_graph
rdf_metadata = self.get_metadata_from_graph(rdf_response_graph)
return self.source_name, rdf_metadata
def get_default_metadata(self, g):
"""Get the default metadata given the RDF graph.
Parameters
----------
g : RDF.ConjunctiveGraph
RDF Conjunctive Graph object
Returns
------
dict
a dictionary of metadata in RDF graph
"""
meta = dict()
try:
if (len(g) > 1):
self.logger.info('FsF-F2-01M : Trying to query generic SPARQL on RDF')
r = g.query(Mapper.GENERIC_SPARQL.value)
#this will only return the first result set (row)
for row in sorted(r):
for l, v in row.asdict().items():
if l is not None:
if l in [
'references', 'source', 'isVersionOf', 'isReferencedBy', 'isPartOf', 'hasVersion',
'replaces', 'hasPart', 'isReplacedBy', 'requires', 'isRequiredBy'
]:
if not meta.get('related_resources'):
meta['related_resources'] = []
meta['related_resources'].append({'related_resource': str(v), 'relation_type': l})
else:
meta[l] = str(v)
break
else:
self.logger.info(
'FsF-F2-01M : Graph seems to contain only one triple, skipping core metadata element test')
except Exception as e:
self.logger.info('FsF-F2-01M : SPARQLing error -: {}'.format(e))
if len(meta) <= 0:
goodtriples = []
has_xhtml = False
for t in list(g):
# exclude xhtml properties/predicates:
if not '/xhtml/vocab' in t[2]:
goodtriples.append(t)
else:
has_xhtml = True
if has_xhtml:
self.logger.info('FsF-F2-01M : Found RDFa like triples but at least some of them seem to be XHTML properties which are excluded')
if len(goodtriples) > 1:
meta['object_type'] = 'Other'
self.logger.info(
'FsF-F2-01M : Could not find core metadata elements through generic SPARQL query on RDF but found '
+ str(len(g)) + ' triples in the given graph')
else:
self.logger.info('FsF-F2-01M : Found some core metadata elements through generic SPARQL query on RDF -: ' +
str(meta.keys()))
return meta
#TODO rename to: get_core_metadata
def get_metadata(self, g, item, type='Dataset'):
"""Get the core (domain agnostic, DCAT, DC, schema.org) metadata given in RDF graph.
Parameters
----------
g : RDF.ConjunctiveGraph
RDF Conjunctive Graph object
item : Any
item to be found the metadata
type : str
type of object
Returns
------
dict
a dictionary of core metadata in RDF graph
"""
DCAT = Namespace('http://www.w3.org/ns/dcat#')
SMA = Namespace('http://schema.org/')
meta = dict()
#default sparql
meta = self.get_default_metadata(g)
self.logger.info('FsF-F2-01M : Trying to get some core domain agnostic (DCAT, DC, schema.org) metadata from RDF graph')
meta['object_identifier'] = (g.value(item, DC.identifier) or
g.value(item, DCTERMS.identifier) or
g.value(item, SDO.identifier))
'''
if self.source_name != self.getEnumSourceNames().RDFA.value:
meta['object_identifier'] = str(item)
meta['object_content_identifier'] = [{'url': str(item), 'type': 'application/rdf+xml'}]
'''
meta['title'] = str(g.value(item, DC.title) or g.value(item, DCTERMS.title) or g.value(item, SMA.name) or g.value(item, SDO.name))
meta['summary'] = str(g.value(item, DC.description) or g.value(item, DCTERMS.description) or
g.value(item, SMA.description) or g.value(item, SDO.description)
or g.value(item, SMA.abstract) or g.value(item, SDO.abstract))
meta['publication_date'] = str(g.value(item, DC.date) or g.value(item, DCTERMS.date) or
g.value(item, DCTERMS.issued)
or g.value(item, SMA.datePublished) or g.value(item, SMA.dateCreated)
or g.value(item, SDO.datePublished) or g.value(item, SDO.dateCreated)
)
meta['publisher'] = str(g.value(item, DC.publisher) or g.value(item, DCTERMS.publisher) or
g.value(item, SMA.publisher) or g.value(item, SDO.publisher) or g.value(item, SMA.provider) or g.value(item, SDO.provider))
meta['keywords'] = []
for keyword in (list(g.objects(item, DCAT.keyword)) + list(g.objects(item, DCTERMS.subject)) +
list(g.objects(item, DC.subject))
or list(g.objects(item, SMA.keywords)) or list(g.objects(item, SDO.keywords))):
meta['keywords'].append(str(keyword))
#TODO creators, contributors
meta['creator'] = str(g.value(item, DC.creator))
meta['license'] = str(g.value(item, DCTERMS.license))
meta['related_resources'] = []
meta['access_level'] = str(g.value(item, DCTERMS.accessRights) or g.value(item, DCTERMS.rights) or
g.value(item, DC.rights)
or g.value(item, SDO.conditionsOfAccess) or g.value(item, SMA.conditionsOfAccess) )
for dctrelationtype in [
DCTERMS.references, DCTERMS.source, DCTERMS.isVersionOf, DCTERMS.isReferencedBy, DCTERMS.isPartOf,
DCTERMS.hasVersion, DCTERMS.replaces, DCTERMS.hasPart, DCTERMS.isReplacedBy, DCTERMS.requires,
DCTERMS.isRequiredBy
]:
dctrelation = g.value(item, dctrelationtype)
if dctrelation:
meta['related_resources'].append({
'related_resource': str(dctrelation),
'relation_type': str(dctrelationtype)
})
for schemarelationtype in [
SMA.isPartOf, SMA.includedInDataCatalog, SMA.subjectOf, SMA.isBasedOn, SMA.sameAs,
SDO.isPartOf, SDO.includedInDataCatalog, SDO.subjectOf, SDO.isBasedOn, SDO.sameAs
]:
schemarelation = g.value(item, schemarelationtype)
if schemarelation:
meta['related_resources'].append({
'related_resource': str(schemarelation),
'relation_type': str(schemarelationtype)
})
if meta:
meta['object_type'] = type
self.logger.info(
'FsF-F2-01M : Found some core domain agnostic (DCAT, DC, schema.org) metadata from RDF graph -: '+str(meta.keys()))
return meta
def get_ontology_metadata(self, graph):
"""Get the ontology given RDF graph.
Parameters
----------
graph : RDF.ConjunctiveGraph
RDF Conjunctive Graph object
Returns
------
dict
a dictionary of Ontology in RDF graph
"""
ont_metadata = dict()
OWL = Namespace('http://www.w3.org/2002/07/owl#')
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
ontologies = list(graph[:RDF.type:OWL.Ontology])
if len(ontologies) > 0:
self.logger.info('FsF-F2-01M : RDF Graph seems to represent a OWL Ontology')
ont_metadata = self.get_metadata(graph, ontologies[0], type='DefinedTermSet')
else:
ontologies = list(graph[:RDF.type:SKOS.ConceptScheme]) or list(graph[:RDF.type:SKOS.Collection])
if len(ontologies) > 0:
self.logger.info('FsF-F2-01M : RDF Graph seems to represent a SKOS Ontology')
ont_metadata = self.get_metadata(graph, ontologies[0], type='DefinedTermSet')
else:
self.logger.info('FsF-F2-01M : Could not parse Ontology RDF')
return ont_metadata
def get_schemaorg_metadata(self, graph):
#TODO: this is only some basic RDF/RDFa schema.org parsing... complete..
#we will only test creative works and subtypes
creative_work_types = Preprocessor.get_schema_org_creativeworks()
creative_work = None
schema_metadata={}
SMA = Namespace('http://schema.org/')
schema_org_nodes = []
# use only schema.org properties and create graph using these.
# is e.g. important in case schema.org is encoded as RDFa and variuos namespaces are used
creative_work_type = 'Dataset'
try:
for root in rdflib.util.find_roots(graph, RDF.type):
# we have https and http as allowed schema.org namespace protocols
if 'schema.org' in str(root):
root_name = str(root).rsplit('/')[-1].strip()
if root_name in creative_work_types:
creative_works = list(graph[:RDF.type:root])
# Finding the schema.org root
if len(list(graph.subjects(object=creative_works[0]))) == 0:
creative_work = creative_works[0]
creative_work_type = root_name
break
except Exception as e:
self.logger.info('FsF-F2-01M : Schema.org RDF graph parsing failed -: '+str(e))
if creative_work:
schema_metadata = self.get_metadata(graph, creative_work, type = creative_work_type)
# creator
creator_node = None
if graph.value(creative_work, SMA.creator):
creator_node = SMA.creator
elif graph.value(creative_work, SDO.creator):
creator_node = SDO.creator
elif graph.value(creative_work, SMA.author):
creator_node = SMA.author
elif graph.value(creative_work, SDO.author):
creator_node = SDO.author
if creator_node:
creators = graph.objects(creative_work, creator_node)
creator_name = []
for creator in creators:
creator_name.append((graph.value(creator, SMA.familyName) or graph.value(creator, SDO.familyName)
or graph.value(creator, SDO.name) or graph.value(creator, SMA.name) ))
if len(creator_name) > 0:
schema_metadata['creator'] = creator_name
return schema_metadata
def get_dcat_metadata(self, graph):
"""Get the Data Catalog (DCAT) metadata given RDF graph.
Parameters
----------
graph : RDF.ConjunctiveGraph
RDF Conjunctive Graph object
Returns
------
dict
a dictionary of Ontology in RDF graph
"""
dcat_metadata = dict()
DCAT = Namespace('http://www.w3.org/ns/dcat#')
datasets = list(graph[:RDF.type:DCAT.Dataset])
if len(datasets) > 0:
dcat_metadata = self.get_metadata(graph, datasets[0], type='Dataset')
# publisher
if idutils.is_url(dcat_metadata.get('publisher')) or dcat_metadata.get('publisher') is None:
publisher = graph.value(datasets[0], DCTERMS.publisher)
# FOAF preferred DCAT compliant
publisher_name = graph.value(publisher, FOAF.name)
dcat_metadata['publisher'] = publisher_name
# in some cases a dc title is used (not exactly DCAT compliant)
if dcat_metadata.get('publisher') is None:
publisher_title = graph.value(publisher, DCTERMS.title)
dcat_metadata['publisher'] = publisher_title
# creator
if idutils.is_url(dcat_metadata.get('creator')) or dcat_metadata.get('creator') is None:
creators = graph.objects(datasets[0], DCTERMS.creator)
creator_name = []
for creator in creators:
creator_name.append(graph.value(creator, FOAF.name))
if len(creator_name) > 0:
dcat_metadata['creator'] = creator_name
# distribution
distribution = graph.objects(datasets[0], DCAT.distribution)
dcat_metadata['object_content_identifier'] = []
for dist in distribution:
dtype, durl, dsize = None, None, None
if not (graph.value(dist, DCAT.accessURL) or graph.value(dist, DCAT.downloadURL)):
self.logger.info('FsF-F2-01M : Trying to retrieve DCAT distributions from remote location -:' +
str(dist))
try:
distgraph = rdflib.Graph()
disturl = str(dist)
distresponse = requests.get(disturl, headers={'Accept': 'application/rdf+xml'})
if distresponse.text:
distgraph.parse(data=distresponse.text, format='application/rdf+xml')
extdist = list(distgraph[:RDF.type:DCAT.Distribution])
durl = (distgraph.value(extdist[0], DCAT.accessURL) or
distgraph.value(extdist[0], DCAT.downloadURL))
dsize = distgraph.value(extdist[0], DCAT.byteSize)
dtype = distgraph.value(extdist[0], DCAT.mediaType)
self.logger.info('FsF-F2-01M : Found DCAT distribution URL info from remote location -:' +
str(durl))
except Exception as e:
self.logger.info('FsF-F2-01M : Failed to retrieve DCAT distributions from remote location -:' +
str(dist))
#print(e)
durl = str(dist)
else:
durl = (graph.value(dist, DCAT.accessURL) or graph.value(dist, DCAT.downloadURL))
#taking only one just to check if licence is available
dcat_metadata['license'] = graph.value(dist, DCTERMS.license)
# TODO: check if this really works..
dcat_metadata['access_rights'] = (graph.value(dist, DCTERMS.accessRights) or
graph.value(dist, DCTERMS.rights))
dtype = graph.value(dist, DCAT.mediaType)
dsize = graph.value(dist, DCAT.bytesSize)
if durl or dtype or dsize:
if idutils.is_url(str(durl)):
dtype = '/'.join(str(dtype).split('/')[-2:])
dcat_metadata['object_content_identifier'].append({
'url': str(durl),
'type': dtype,
'size': str(dsize)
})
if dcat_metadata['object_content_identifier']:
self.logger.info('FsF-F3-01M : Found data links in DCAT.org metadata -: ' +
str(dcat_metadata['object_content_identifier']))
#TODO: add provenance metadata retrieval
#else:
# self.logger.info('FsF-F2-01M : Found DCAT content but could not correctly parse metadata')
#in order to keep DCAT in the found metadata list, we need to pass at least one metadata value..
#dcat_metadata['object_type'] = 'Dataset'
return dcat_metadata
#rdf_meta.query(self.metadata_mapping.value)
#print(rdf_meta)
#return None
def get_content_type(self):
"""Get the content type.
Returns
------
str
a string of content type
"""
return self.content_type
|
import sys
import typing
import numba as nb
import numpy as np
@nb.njit(cache=True)
def solve() -> typing.NoReturn:
...
def main() -> typing.NoReturn:
s = input()
if s[-2:] == 'er':
print('er')
else:
print('ist')
main()
|
def select2_processor(idps):
"""
A simple processor for Select2, to adjust the data ready to use for Select2.
See https://select2.org/data-sources/formats
"""
def change_idp(idp):
idp['id'] = idp.pop('entity_id')
idp['text'] = idp.pop('name')
return idp
return [change_idp(idp) for idp in idps]
|
#!/usr/bin/env python
"""
from http://wdc.kugi.kyoto-u.ac.jp/igrf/gggm/
"""
import pytest
from geo2mag import geo2mag
@pytest.mark.parametrize(
"glat,glon,mlat,mlon",
[
(79.3, 288.59, 88.880248, 11.379883),
(
[79.3, 79.3, 0, 90, -90, 45],
[288.59, 288.583, 0, 90, -90, 150],
[88.880248, 88.89, 2.88, 80.31, -80.31, 37.36],
[11.379883, 11.68, 72.85, 180, 0, -142.82],
),
],
)
def test_geo2mag(glat, glon, mlat, mlon):
ma, mo = geo2mag(glat, glon)
assert ma == pytest.approx(mlat, rel=1e-3, abs=0.3)
assert mo == pytest.approx(mlon, rel=1e-3, abs=0.4)
if not isinstance(glat, float):
assert len(mlat) == len(ma)
if __name__ == "__main__":
pytest.main()
|
import os
import glob
import pandas as pd
from datetime import date
from tempfile import mkdtemp
from shutil import rmtree
from unittest import main, TestCase
from serenata_toolbox.chamber_of_deputies.dataset import Dataset
class TestChamberOfDeputiesDataset(TestCase):
def setUp(self):
self.path = mkdtemp(prefix='serenata-')
print(self.path)
self.subject = Dataset(self.path)
self.years = [year for year in range(2009, date.today().year + 1)]
def tearDown(self):
rmtree(self.path, ignore_errors=True)
def test_fetch_translate_clean_integration(self):
self.subject.fetch()
files = ["Ano-{}.csv".format(n) for n in self.years]
files.append('datasets-format.html')
for name in files:
file_path = os.path.join(self.path, name)
assert(os.path.exists(file_path))
self.subject.translate()
for name in ["reimbursements-{}.xz".format(n) for n in self.years]:
file_path = os.path.join(self.path, name)
assert(os.path.exists(file_path))
self.subject.clean()
file_path = os.path.join(self.path, 'reimbursements.xz')
assert(os.path.exists(file_path))
# test for subquota translation
dataset = pd.read_csv(file_path, compression='xz')
all_subquotas = ['Maintenance of office supporting parliamentary activity',
'Locomotion, meal and lodging',
'Fuels and lubricants',
'Consultancy, research and technical work',
'Publicity of parliamentary activity',
'Purchase of office supplies',
'Software purchase or renting; Postal services; Subscriptions',
'Security service provided by specialized company',
'Flight tickets',
'Telecommunication',
'Postal services',
'Publication subscriptions',
'Congressperson meal',
'Lodging, except for congressperson from Distrito Federal',
'Automotive vehicle renting or watercraft charter',
'Aircraft renting or charter of aircraft',
'Automotive vehicle renting or charter',
'Watercraft renting or charter',
'Taxi, toll and parking',
'Terrestrial, maritime and fluvial tickets',
'Participation in course, talk or similar event',
'Flight ticket issue']
present_subquotas = pd.unique(dataset['subquota_description'])
for subquota in present_subquotas:
assert(subquota in all_subquotas)
if __name__ == '__main__':
main()
|
import warnings
from .exceptions import HumioMaxResultsExceededWarning, HumioBackendWarning
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
def patched_poll_until_done(self, **kwargs):
raise NotImplementedError(
"Method disabled, see humiolib issue #14. Use poll() instead since this method can get stuck polling forever."
)
def poll_safe(self, raise_warnings=True, **kwargs):
"""
Calls `poll()` on the queryjob and adds an extra attribute `warnings` to
the resulting PollResult containing a list of any returned warnings.
Parameters
----------
warn : bool, optional
Raises warnings each time a warning is encountered, by default True
Warns
-----
HumioBackendWarning
When the Humio backend has returned a warning.
MaxResultsExceededWarning
When the QueryJob has returned a partial result due to pagination.
>>> import warnings
>>> warnings.simplefilter('ignore', humioapi.HumioBackendWarning)
>>> warnings.simplefilter('ignore', humioapi.HumioMaxResultsExceededWarning)
Returns
-------
PollResult
A humiolib poll result object with events, metadata and warnings.
"""
result = self.poll()
result.warnings = []
warnings.formatwarning = warning_on_one_line
for message in result.metadata.get("warnings", []):
result.warnings.append(message)
if raise_warnings:
warnings.warn(message, HumioBackendWarning, stacklevel=0)
if result.metadata.get("extraData", {}).get("hasMoreEvents", "") == "true":
message = (
"The search results exceeded the limits for this API."
" There are more results available in the backend than available here."
" Possible workaround: pipe to head() or tail() with limit=n."
)
result.warnings.append(message)
if raise_warnings:
warnings.warn(message, HumioMaxResultsExceededWarning, stacklevel=0)
return result
|
from flask import Blueprint, render_template
from solarvibes.models import Farm, Field, Pump, Agripump
from flask_login import current_user
from flask_security import login_required
from datetime import datetime, timezone
settings = Blueprint(
'settings',
__name__,
template_folder="templates"
)
##################
# USER FARMS
##################
@settings.route('/', methods=['GET'])
@login_required
def index():
# USER FARMS
user = current_user
farms = user.farms.all()
def progress():
for farm in farms:
# print(farm)
for field in farm.fields.all():
# print(field)
for crop in field.crops.all():
# print(crop)
cycle_days_so_far = ( datetime.now(timezone.utc) - field.field_cultivation_start_date ).days
cycle_days = crop._dtm + crop._dtg
progress = (cycle_days_so_far / cycle_days) * 100
# print (cycle_days_so_far, cycle_days, progress)
progress()
timenow = datetime.now(timezone.utc)
# USER AGRIMODULES
user = current_user
agrimodules = user.agrimodules.all()
# def list_agrimodules():
# for agrimodule in agrimodules:
# print(agrimodule, agrimodule.identifier)
# for agripump in agrimodule.agripumps.all():
# print(agripump, agripump.identifier)
# for agrisensor in agrimodule.agrisensors.all():
# print(agrisensor, agrisensor.identifier)
# list_agrimodules()
# USER PUMPS
pumps = user.pumps.all()
farm_db = Farm.query
field_db = Field.query
pump_db = Pump.query
agripump_db = Agripump.query
return render_template('settings/index.html', farms = farms, timenow = timenow, agrimodules=agrimodules, farm_db = farm_db, field_db = field_db, pump_db = pump_db, pumps=pumps, agripump_db = agripump_db)
|
#!/usr/bin/env python
from shutil import copytree, copy2
from pathlib import Path
fg_src_dir = Path("~/Goodhertz/fontgoggles/Lib/fontgoggles").expanduser()
fg_dst_dir = Path(__file__).parent / "drafting/fontgoggles"
fg_dst_dir.mkdir(exist_ok=True)
copy2(fg_src_dir.parent.parent / "LICENSE.txt", fg_dst_dir / "LICENSE.txt")
(fg_dst_dir / "README.md").write_text("This is an automated import of github.com/goodhertz/fontgoggles, to avoid hosting this code on pypi itself")
for submodule in ["compile", "font", "misc"]:
copytree(fg_src_dir / submodule, fg_dst_dir / submodule, dirs_exist_ok=True)
for pyf in (fg_dst_dir / submodule).glob("**/*.py"):
if pyf.stem in ["unicodeNameList"]:
pyf.unlink()
else:
pycode = pyf.read_text()
pycode = pycode.replace(""""fontgoggles.font""", """"drafting.fontgoggles.font""")
pycode = pycode.replace(""""fontgoggles.compile""", """"drafting.fontgoggles.compile""")
pyf.write_text(pycode)
|
from unittest.mock import MagicMock, patch
from tornado.testing import AsyncTestCase, gen_test
from fixtures.exploits import Exploit
from structs import Scan, TransportProtocol, Port, ScanContext
from utils.task import Task
class TaskTest(AsyncTestCase):
"""
Testing task behaviour
"""
@patch('utils.task.time.time', MagicMock(return_value=17))
def setUp(self):
"""
Set up init variables
"""
super().setUp()
self.executor = MagicMock()
self.executor.kudu_queue = MagicMock()
self.executor.exploits = MagicMock()
self.context = ScanContext(aucote=self.executor, scanner=MagicMock(scan=Scan()))
self.task = Task(context=self.context)
def test_init(self):
"""
Test init and properties
"""
self.assertEqual(self.task.aucote, self.executor)
self.assertEqual(self.task.creation_time, 17)
self.assertEqual(self.task.kudu_queue, self.executor.kudu_queue)
@gen_test
async def test_call(self):
"""
Test call
"""
with self.assertRaises(NotImplementedError):
await self.task()
def test_send_msg(self):
"""
Task sending message
"""
self.task.send_msg("TEST")
self.executor.kudu_queue.send_msg.assert_called_once_with("TEST")
def test_store_scan(self):
port = Port(node=None, number=80, transport_protocol=TransportProtocol.UDP)
port.scan = Scan(end=25.0)
exploit = Exploit(exploit_id=5)
self.task.store_scan_end(exploits=[exploit], port=port)
self.task.aucote.storage.save_security_scans.assert_called_once_with(exploits=[exploit], port=port,
scan=self.context.scanner.scan)
def test_reload_config(self):
result = self.task.reload_config()
self.assertIsNone(result)
def test_storage_property(self):
self.assertEqual(self.executor.storage, self.task.storage)
|
n = int(input("Enter the number whose factorial you want : "))
def fac(n):
if n >= 0:
y = 1
else:
y = -1
x = 1
for i in range(y, n+y, y):
x = x*i
return x
x = fac(n)
print("Factorial of {} is : ".format(n), x)
|
from time import time
import pytest
from vk import API
from vk.exceptions import VkAPIError
def test_missed_v_param(access_token, v):
"""
Missed version on API instance
"""
api = API(access_token)
with pytest.raises(VkAPIError, match=r'8\. Invalid request: v is required'):
api.getServerTime()
assert api.getServerTime(v=v) > time() - 10
def test_incorrect_token(v):
"""
Incorrect token on API instance
"""
api = API('?', v=v)
with pytest.raises(VkAPIError, match=r'5\. User authorization failed') as exc_info:
api.getServerTime()
exc = exc_info.value
assert exc.is_access_token_incorrect()
assert not exc.is_captcha_needed()
assert exc.captcha_sid is None
assert exc.captcha_img is None
|
#!/usr/bin/env python
import numpy as np
import sys
def main():
# the array integers refer to the length of cubes in initial state
stack1 = range(1, int(sys.argv[1]) + 1)
stack2 = []
stack3 = []
global Game
Game = np.array([stack1, stack2, stack3])
print("Initial Game State: \n")
print("Stack1: %s \nStack2: %s \nStack3: %s \n" %
(Game[0], Game[1], Game[2]))
solve(Game)
def moveCube(source, target):
target.append(source[-1])
del source[-1]
print("Stack1: %s \nStack2: %s \nStack3: %s \n" %
(Game[0], Game[1], Game[2]))
def move (n, source, target, auxiliary):
if n == 1:
moveCube(source, target)
else:
# print(n) # print out recursion depth
move(n-1, source, auxiliary, target)
moveCube(source, target)
move(n-1, auxiliary, target, source)
def solve(Game):
move(len(Game[0]), Game[0], Game[2], Game[1])
print("Initial Game State: \n")
print("Stack1: %s \nStack2: %s \nStack3: %s \n" %
(Game[0], Game[1], Game[2]))
if __name__ == "__main__":
main()
|
'''
Max of 3 numbers
'''
number_1 = int(input())
number_2 = int(input())
number_3 = int(input())
if (number_1 < number_2):
number_1 = number_2
if (number_1 < number_3):
print(number_3)
else:
print(number_1)
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.base.build_environment import get_buildroot
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class DepExportsIntegrationTest(PantsRunIntegrationTest):
SRC_PREFIX = 'testprojects/tests'
SRC_TYPES = ['java', 'scala']
SRC_PACKAGE = 'org/pantsbuild/testproject/exports'
@classmethod
def hermetic(cls):
return True
def test_compilation(self):
for lang in self.SRC_TYPES:
path = os.path.join(self.SRC_PREFIX, lang, self.SRC_PACKAGE)
pants_run = self.run_pants(['list', '{}::'.format(path)])
self.assert_success(pants_run)
target_list = pants_run.stdout_data.strip().split('\n')
for target in target_list:
pants_run = self.run_pants(['compile', '--lint-scalafmt-skip', target])
self.assert_success(pants_run)
def modify_exports_and_compile(self, target, modify_file):
with self.temporary_sourcedir() as tmp_src:
src_dir = os.path.relpath(os.path.join(tmp_src, os.path.basename(self.SRC_PACKAGE)), get_buildroot())
target_dir, target_name = target.rsplit(':', 1)
shutil.copytree(target_dir, src_dir)
with self.temporary_workdir() as workdir:
cmd = ['compile', '--lint-scalafmt-skip', '{}:{}'.format(src_dir, target_name)]
pants_run = self.run_pants_with_workdir(command=cmd, workdir=workdir)
self.assert_success(pants_run)
with open(os.path.join(src_dir, modify_file), 'ab') as fh:
fh.write(b'\n')
pants_run = self.run_pants_with_workdir(command=cmd, workdir=workdir)
self.assert_success(pants_run)
self.assertTrue('{}:{}'.format(src_dir, target_name) in pants_run.stdout_data)
def test_invalidation(self):
for lang in self.SRC_TYPES:
path = os.path.join(self.SRC_PREFIX, lang, self.SRC_PACKAGE)
target = '{}:D'.format(path)
self.modify_exports_and_compile(target, 'A.{}'.format(lang))
self.modify_exports_and_compile(target, 'B.{}'.format(lang))
def test_non_exports(self):
pants_run = self.run_pants(['compile', '--lint-scalafmt-skip',
'testprojects/tests/scala/org/pantsbuild/testproject/non_exports:C'])
self.assert_failure(pants_run)
self.assertIn('FAILURE: Compilation failure: Failed jobs: '
'compile(testprojects/tests/scala/org/pantsbuild/testproject/non_exports:C)',
pants_run.stdout_data)
class DepExportsThriftTargets(PantsRunIntegrationTest):
def test_exports_for_thrift_targets(self):
pants_run = self.run_pants(['compile', 'testprojects/src/thrift/org/pantsbuild/thrift_exports:C-with-exports'])
self.assert_success(pants_run)
pants_run = self.run_pants(['compile', 'testprojects/src/thrift/org/pantsbuild/thrift_exports:C-without-exports'])
self.assert_failure(pants_run)
self.assertIn('Symbol \'type org.pantsbuild.thrift_exports.thriftscala.FooA\' is missing from the classpath',
pants_run.stdout_data)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
__author__ = 'andyguo'
from dayu_file_format.curve.data_structure import Point2D
import pytest
class TestPoint2D(object):
def test___init__(self):
p = Point2D(0, 1)
assert p.x == 0
assert p.y == 1
assert type(p.x) is float
assert type(p.y) is float
p = Point2D(0.0, 0.0)
assert p.x == 0
assert p.y == 0
assert type(p.x) is float
assert type(p.y) is float
with pytest.raises(ValueError) as e:
Point2D('12', 12)
def test___eq__(self):
assert Point2D(100, 100) == Point2D(100, 100)
assert Point2D(100, 100) == Point2D(100.0, 100.0)
assert Point2D(100.0, 100.9) != Point2D(100.0, 100.0)
assert Point2D(0,0) != [0,0]
def test___add__(self):
assert Point2D(0, 0) + Point2D(1, 2) == Point2D(1, 2)
assert Point2D(1, 1) + 2 == Point2D(3, 3)
assert Point2D(1, 1) + 2.0 == Point2D(3, 3)
assert Point2D(1, 1) + (-2.0) == Point2D(-1, -1)
with pytest.raises(TypeError) as e:
Point2D(1, 1) + [1, 2]
def test___iadd__(self):
p = Point2D(0, 0)
p += 1
assert p == Point2D(1, 1)
p += Point2D(2, 3)
assert p == Point2D(3, 4)
p += (-4.0)
assert p == Point2D(-1, 0)
with pytest.raises(TypeError) as e:
p += [1, 2]
def test___sub__(self):
assert Point2D(0, 0) - Point2D(1, 2) == Point2D(-1, -2)
assert Point2D(1, 1) - 2 == Point2D(-1, -1)
assert Point2D(1, 1) - 2.0 == Point2D(-1, -1)
assert Point2D(1, 1) - (-2.0) == Point2D(3, 3)
with pytest.raises(TypeError) as e:
Point2D(1, 1) - [1, 2]
def test___isub__(self):
p = Point2D(0, 0)
p -= 1
assert p == Point2D(-1, -1)
p -= Point2D(2, 3)
assert p == Point2D(-3, -4)
p -= (-4.0)
assert p == Point2D(1, 0)
with pytest.raises(TypeError) as e:
p -= [1, 2]
def test___neg__(self):
assert -Point2D(1, 1) == Point2D(-1, -1)
assert -Point2D(-3, 4) == Point2D(3, -4)
assert -Point2D(0, 0) == Point2D(0, 0)
def test___mul__(self):
assert Point2D(0, 0) * Point2D(1, 2) == Point2D(0, 0)
assert Point2D(1, 1) * 2 == Point2D(2, 2)
assert Point2D(1, 1) * 2.0 == Point2D(2, 2)
assert Point2D(1, 1) * (-2.0) == Point2D(-2, -2)
with pytest.raises(TypeError) as e:
Point2D(1, 1) * [1, 2]
def test___imul__(self):
p = Point2D(1, 2)
p *= 1
assert p == Point2D(1, 2)
p *= Point2D(2, 3)
assert p == Point2D(2, 6)
p *= (-4.0)
assert p == Point2D(-8, -24)
with pytest.raises(TypeError) as e:
p *= [1, 2]
def test___div__(self):
assert Point2D(0, 0) / Point2D(1, 2) == Point2D(0, 0)
assert Point2D(1, 1) / 2 == Point2D(0.5, 0.5)
assert Point2D(1, 1) / 2.0 == Point2D(0.5, 0.5)
assert Point2D(1, 1) / (-2.0) == Point2D(-0.5, -0.5)
with pytest.raises(TypeError) as e:
Point2D(1, 1) / [1, 2]
with pytest.raises(ZeroDivisionError) as e:
Point2D(100, 24) / 0
with pytest.raises(ZeroDivisionError) as e:
Point2D(100, 24) / Point2D(0, 2)
with pytest.raises(ZeroDivisionError) as e:
Point2D(100, 24) / Point2D(2, 0)
with pytest.raises(ZeroDivisionError) as e:
Point2D(100, 24) / Point2D(0, 0)
def test___idiv__(self):
p = Point2D(1, 2)
p /= 1
assert p == Point2D(1, 2)
p /= Point2D(2, 4)
assert p == Point2D(0.5, 0.5)
p /= (-0.25)
assert p == Point2D(-2, -2)
with pytest.raises(TypeError) as e:
p /= [1, 2]
with pytest.raises(ZeroDivisionError) as e:
p /= 0
with pytest.raises(ZeroDivisionError) as e:
p /= Point2D(0, 2)
with pytest.raises(ZeroDivisionError) as e:
p /= Point2D(2, 0)
with pytest.raises(ZeroDivisionError) as e:
p /= Point2D(0, 0)
def test_dot(self):
assert Point2D(0, 0).dot(Point2D(1, 2)) == 0
assert Point2D(0, 0).dot(Point2D(0, 0)) == 0
assert Point2D(1, 3).dot(Point2D(1, 2)) == 7
assert Point2D(-2, -3).dot(Point2D(1, 2)) == -8
assert Point2D(-2, -3).dot(Point2D(-1, -2)) == 8
assert Point2D(-2, 3).dot(Point2D(1, -2)) == -8
with pytest.raises(TypeError) as e:
Point2D(1, 2).dot([1, 2])
def test_length(self):
assert Point2D(0, 0).length == 0
assert Point2D(1, 2).length == 5 ** 0.5
assert Point2D(3, 4).length == 5
assert Point2D(-3, 4).length == 5
assert Point2D(-3, -4).length == 5
def test_normalize(self):
assert Point2D(1, 0).normalize() == Point2D(1, 0)
assert Point2D(1, 1).normalize() == Point2D(1 / 2 ** 0.5, 1 / 2 ** 0.5)
assert Point2D(-1, 1).normalize() == Point2D(-1 / 2 ** 0.5, 1 / 2 ** 0.5)
assert Point2D(-1, -1).normalize() == Point2D(-1 / 2 ** 0.5, -1 / 2 ** 0.5)
def test_to_list(self):
assert Point2D(0, 0).to_list() == [0, 0]
assert Point2D(1, 1).to_list() == [1, 1]
assert Point2D(-1, 1).to_list() == [-1, 1]
|
import matplotlib.pyplot as plt
time = [600, 800, 1000, 1200, 1400, 1600, 1800, 2000, 2200, 2400]
newreno = [0.077068, 0.021771, 0.046267, 0.043259, 0.024609, 0.021640, 0.020837, 0.018810, 0.015794, 0.018096]
vegas = [0.305460, 0.183872, 0.162933, 0.135204, 0.103017, 0.126727, 0.089226, 0.087788, 0.079121, 0.079745]
newrenoObO = [0.095395, 0.057802, 0.053649, 0.038289, 0.033037, 0.038582, 0.032855, 0.028214, 0.024470, 0.020238]
vegasObO = [0.016378, 0.006232, 0.009765, 0.004145, 0.003522, 0.007682, 0.005428, 0.003665, 0.004471, 0.002048]
plt.plot(time, newreno, 'bo-', label="NewReno S1")
plt.plot(time, newrenoObO, 'bo:', label="NewReno S2")
plt.plot(time, vegas, 'ro-', label="Vegas S1")
plt.plot(time, vegasObO, 'ro:', label="Vegas S2")
plt.title("Loss rates of TCP agents Newreno and Vegas in two settings \n over different periods")
plt.xlabel("duration")
plt.ylabel("loss rate")
plt.text(1700, 0.2, "S1: SameTime \nS2: OneByOne")
plt.legend()
plt.show()
|
class CordParser:
"""
the file should end in .cord, and should contains the direction and the angles to turn.
f -> forward
b -> backward
r -> right
l -> left
u -> stop drawing
p -> start drawing
The file should be written as:
f=100
r=20
b=50
..etc
"""
def __init__(self, filename: str, method: str) -> None:
self.valid_chrs = ["f", "b", "r", "l", "u", "d"]
if not filename.endswith(".cord"):
raise TypeError("Filename should end with \".cord\"")
self.file_ = open(filename, method)
self.file_data = self.file_.read().split("\n")
def __enter__(self):
rl = []
for i, d in enumerate(self.file_data):
nd = d.replace(" ", "").split("=")
if nd[0] not in self.valid_chrs:
raise TypeError("Invalid character found")
rl.append({i: nd})
return self.data(rl)
def __exit__(self, type, value, traceback):
self.file_.close()
@staticmethod
def data(list_):
yield from list_
|
# -*- coding: utf-8 -*-
"""
gogo.vos.config
~~~~~~~~~~~~~~~
Used to load grpc server config.
"""
import logging
import sys
import yaml
from gogo.consts import REGISTRY_ENABLED, APP_CONFIG_PATH
from gogo.exc import AppConfigLoadFailException
from gogo.utils import EnvvarReader, cached_property
from gogo.vos.consts import ENV_DEV, SYSLOG_SOCKET, DEFAULT_APP_PORT
from gogo.vos.loader import build_env_loader
logger = logging.getLogger(__name__)
class CoreConfig(object):
"""
Application independent configs, as a yaml file at
`CORE_CONFIG_PATH`, example::
env: 'dev'
cluster: 'elf-master-1'
zookeeper_config:
hosts: localhost:2181
username: root
password:
witch show clearly the app's dependence and identity.
:type loader: ConfigLoader
"""
def __init__(self, loader=None):
self.config = None
self.loader = loader if loader else build_env_loader()
def load(self):
self.config = self.loader.load()
return self
@property
def env(self):
return self.config.get('env', ENV_DEV)
@property
def registory_enabled(self):
return self.config.get('registry_enabled', REGISTRY_ENABLED)
@property
def raw_cluster(self):
# TODO(vici) topo in a server like elf
raise NotImplementedError
# return self.config.get('cluster', None)
@property
def cluster(self):
raise NotImplemented
@property
def zookeeper_config(self):
return self.config.get('zookeeper_configs', {
'hosts': 'localhost:2181',
'username': 'root',
'password': '',
})
@property
def etcd_config(self):
# FIXME add default etcd config
return self.config.get('etcd_configs', {})
@property
def statsd_uri(self):
uri = self.config.get('statsd_url', None)
if uri:
return uri
if self.env == ENV_DEV:
return "statsd://127.0.0.1:8122"
else:
logger.error('no statsd host configured for env: %s cluster: %s',
self.env, self.cluster)
@property
def syslog_socket(self):
return self.config.get('syslog_socket') or SYSLOG_SOCKET
# TODO(vici) a lot things to do. before load core_config
core_config = None
def load_core_config(raise_exc=False):
raise NotImplementedError
app_config = None
class AppConfig(object):
"""Application related configs, as a yaml file at
`APP_CONFIG_PATH`, e.g.:
app_name: ves.note
settings: note.settings
services:
app: note.service:service
thrift_file: note/note.thrift
requirements: thrift_requirements.txt
"""
DEFAULT_APP_PORT = DEFAULT_APP_PORT
def __init__(self):
self.config = None
self.etrace_enabled = False
self.api_ops_metrics_enabled = False
self.ves_stats_enabled = False
def load(self, config_path=APP_CONFIG_PATH, raise_exc=False):
try:
self.config = yaml.load(open(config_path))
except (IOError, yaml.error.YAMLError):
if raise_exc is True:
raise AppConfigLoadFailException
logger.error('Connot load %s, exit.', config_path)
sys.exit(1)
return self
def _get_conf(self, key, default=None):
"""Help to try config first on key 'services' and then root.
"""
if 'services' in self.config:
return self.config['services'].get(key, default)
return self.config.get(key, default)
@cached_property
def app_uri(self):
app = self._get_conf('app', None)
if app is None:
raise RuntimeError("Missing `app` in app.yaml. ")
return app
def get_grpc_module(self):
pass
class GrpcAppConfig(AppConfig):
TYPE_NAME = 'grpc'
DEFAULT_APP_PORT = DEFAULT_APP_PORT
def load_app_config():
"""Load app config lazily
AKA load_server_config.
"""
global app_config
if app_config is None:
# from . import env
# if env.is_grpc_app():
app_config = GrpcAppConfig().load(raise_exc=raise_exc)
return app_config
load_server_config = load_app_config
|
#!/usr/bin/env python
# Copyright 2021 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import glob
import json
import os
import sys
import h5py
import numpy as np
import pandas as pd
import slurm
import util
'''
basenji_predict_bed.py
Predict sequences from a BED file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <bed_file>'
parser = OptionParser(usage)
# inject options
inject_options = OptionGroup(parser, 'motif inject options')
inject_options.add_option('--db', dest='database',
default='cisbp', help='Motif database [Default: %default]')
inject_options.add_option('-e', dest='pwm_exp',
default=1, type='float',
help='Exponentiate the position weight matrix values [Default: %default]')
inject_options.add_option('-g', dest='genome',
default='ce11', help='Genome [Default: %default]')
inject_options.add_option('-o', dest='out_dir',
default='inject_out',
help='Output directory [Default: %default]')
inject_options.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
inject_options.add_option('-s', dest='offset',
default=0, type='int',
help='Position offset to inject motif [Default: %default]')
inject_options.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
inject_options.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
fold_options = OptionGroup(parser, 'cross-fold options')
fold_options.add_option('--env', dest='conda_env',
default='tf2.6',
help='Anaconda environment [Default: %default]')
fold_options.add_option('--name', dest='name',
default='inject', help='SLURM name prefix [Default: %default]')
fold_options.add_option('-q', dest='queue',
default='geforce',
help='SLURM queue on which to run the jobs [Default: %default]')
parser.add_option_group(fold_options)
(options, args) = parser.parse_args()
if len(args) == 3:
params_file = args[0]
folds_dir = args[1]
bed_file = args[2]
else:
parser.error('Must provide parameter and model folds directory and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
################################################################
# inject
jobs = []
scores_files = []
for fold_dir in glob.glob('%s/f*' % folds_dir):
fold_name = fold_dir.split('/')[-1]
job_name = '%s-%s' % (options.name, fold_name)
# update output directory
inject_dir = '%s/%s' % (fold_dir, options.out_dir)
# check if done
scores_file = '%s/scores.h5' % inject_dir
scores_files.append(scores_file)
if os.path.isfile(scores_file):
print('%s already generated.' % scores_file)
else:
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' echo $HOSTNAME;'
basenji_cmd += ' basenji_motifs_inject.py'
basenji_cmd += ' %s' % options_string(options, inject_options, inject_dir)
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s/train/model_best.h5' % fold_dir
basenji_cmd += ' %s' % bed_file
basenji_job = slurm.Job(basenji_cmd, job_name,
out_file='%s.out'%inject_dir,
err_file='%s.err'%inject_dir,
cpu=2, gpu=1,
queue=options.queue,
mem=30000, time='3-0:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# ensemble
ensemble_dir = '%s/ensemble' % folds_dir
if not os.path.isdir(ensemble_dir):
os.mkdir(ensemble_dir)
inject_dir = '%s/%s' % (ensemble_dir, options.out_dir)
if not os.path.isdir(inject_dir):
os.mkdir(inject_dir)
print('Generating ensemble scores.')
ensemble_scores_h5(inject_dir, scores_files)
def ensemble_scores_h5(ensemble_dir, scores_files):
# open ensemble
ensemble_h5_file = '%s/scores.h5' % ensemble_dir
if os.path.isfile(ensemble_h5_file):
os.remove(ensemble_h5_file)
ensemble_h5 = h5py.File(ensemble_h5_file, 'w')
# transfer base
base_keys = ['motif','tf']
sad_stats = []
scores0_h5 = h5py.File(scores_files[0], 'r')
for key in scores0_h5.keys():
if key in base_keys:
ensemble_h5.create_dataset(key, data=scores0_h5[key])
else:
sad_stats.append(key)
sad_shape = scores0_h5[key].shape
scores0_h5.close()
# average sum stats
num_folds = len(scores_files)
for sad_stat in sad_stats:
# initialize ensemble array
sad_values = np.zeros(shape=sad_shape, dtype='float32')
# read and add folds
for scores_file in scores_files:
with h5py.File(scores_file, 'r') as scores_h5:
sad_values += scores_h5[sad_stat][:].astype('float32')
# normalize and downcast
sad_values /= num_folds
sad_values = sad_values.astype('float16')
# save
ensemble_h5.create_dataset(sad_stat, data=sad_values)
ensemble_h5.close()
def options_string(options, group_options, rep_dir):
options_str = ''
for opt in group_options.option_list:
opt_str = opt.get_opt_string()
opt_value = options.__dict__[opt.dest]
# wrap askeriks in ""
if type(opt_value) == str and opt_value.find('*') != -1:
opt_value = '"%s"' % opt_value
# no value for bools
elif type(opt_value) == bool:
if not opt_value:
opt_str = ''
opt_value = ''
# skip Nones
elif opt_value is None:
opt_str = ''
opt_value = ''
# modify
elif opt.dest == 'out_dir':
opt_value = rep_dir
options_str += ' %s %s' % (opt_str, opt_value)
return options_str
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
def enable_live_preview():
return """
<script>
<script>
"""
|
# Generated by Django 3.1.7 on 2021-04-14 19:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import markdownx.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0017_TinkoffCreditPromoCode'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(blank=True, db_index=True, null=True)),
('slug', models.UUIDField(db_index=True, default=uuid.uuid4, unique=True)),
('name', models.CharField(max_length=256, verbose_name='Name')),
('text', markdownx.models.MarkdownxField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.course')),
],
options={
'verbose_name': 'Homework',
'verbose_name_plural': 'Homeworks',
},
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(blank=True, db_index=True, null=True)),
('slug', models.UUIDField(db_index=True, default=uuid.uuid4, unique=True)),
('text', markdownx.models.MarkdownxField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='homework.question')),
],
options={
'verbose_name': 'Homework answer',
'verbose_name_plural': 'Homework answers',
},
),
]
|
import json
import pytest
from mayday.controllers.redis import RedisController
USER_ID = 123456789
ACTION = 'test'
@pytest.mark.usefixtures()
class Test:
@pytest.fixture(autouse=True, scope='function')
def before_all(self):
pass
def test_redis_key(self):
redis = RedisController()
assert redis.get_key(USER_ID, ACTION) == '123456789_test'
def test_redis(self):
redis = RedisController()
user_profile = dict(test='test')
assert redis.save(USER_ID, ACTION, user_profile)
assert user_profile == redis.load(USER_ID, ACTION)
# Delete
assert redis.clean(USER_ID, ACTION)
def test_redis_direct_read(self):
redis = RedisController()
user_profile = dict(test='test')
assert redis.save(USER_ID, ACTION, user_profile)
assert redis.direct_read(USER_ID, ACTION) == json.dumps(user_profile)
def test_clean_all_cache(self):
redis = RedisController()
redis.save(USER_ID, 'search', dict(text='test'))
redis.save(USER_ID, 'post', dict(text='test'))
redis.save(USER_ID, 'quick_search', dict(text='test'))
assert redis.count(USER_ID) == 4
redis.clean_all(USER_ID, 'start')
assert redis.count(USER_ID) == 0
|
from dataclasses import dataclass
from typing import Optional
from typing import Tuple
from joblib import delayed
from joblib import Parallel
import numpy as np
import pytest
from sklearn.base import ClassifierMixin
from sklearn.base import clone
from sklearn.base import is_classifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from obp.dataset import linear_behavior_policy
from obp.dataset import logistic_reward_function
from obp.dataset import SyntheticBanditDataset
from obp.policy import IPWLearner
from obp.policy import NNPolicyLearner
from obp.policy import QLearner
from obp.policy.base import BaseOfflinePolicyLearner
# hyperparameters of the regression model used in model dependent OPE estimators
hyperparams = {
"lightgbm": {
"n_estimators": 100,
"learning_rate": 0.005,
"max_depth": 5,
"min_samples_leaf": 10,
"random_state": 12345,
},
"logistic_regression": {
"max_iter": 10000,
"C": 1000,
"random_state": 12345,
},
"random_forest": {
"n_estimators": 500,
"max_depth": 5,
"min_samples_leaf": 10,
"random_state": 12345,
},
}
base_model_dict = dict(
logistic_regression=LogisticRegression,
lightgbm=GradientBoostingClassifier,
random_forest=RandomForestClassifier,
)
# n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model
offline_experiment_configurations = [
(
600,
10,
5,
"logistic_regression",
"logistic_regression",
),
(
450,
3,
2,
"lightgbm",
"lightgbm",
),
(
500,
5,
3,
"random_forest",
"random_forest",
),
(
500,
3,
5,
"logistic_regression",
"random_forest",
),
(
800,
10,
10,
"lightgbm",
"logistic_regression",
),
]
@dataclass
class RandomPolicy(BaseOfflinePolicyLearner):
def __post_init__(self) -> None:
super().__post_init__()
def fit(self):
raise NotImplementedError
def predict(self, context: np.ndarray) -> np.ndarray:
n_rounds = context.shape[0]
action_dist = np.random.rand(n_rounds, self.n_actions, self.len_list)
return action_dist
@dataclass
class UniformSampleWeightLearner(BaseOfflinePolicyLearner):
base_classifier: Optional[ClassifierMixin] = None
def __post_init__(self) -> None:
super().__post_init__()
if self.base_classifier is None:
self.base_classifier = LogisticRegression(random_state=12345)
else:
if not is_classifier(self.base_classifier):
raise ValueError("base_classifier must be a classifier")
self.base_classifier_list = [
clone(self.base_classifier) for _ in np.arange(self.len_list)
]
def _create_train_data_for_opl(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: np.ndarray,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
return context, (reward / pscore), action
def fit(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> None:
if pscore is None:
n_actions = np.int32(action.max() + 1)
pscore = np.ones_like(action) / n_actions
if position is None or self.len_list == 1:
position = np.zeros_like(action, dtype=int)
for position_ in np.arange(self.len_list):
X, sample_weight, y = self._create_train_data_for_opl(
context=context[position == position_],
action=action[position == position_],
reward=reward[position == position_],
pscore=pscore[position == position_],
)
self.base_classifier_list[position_].fit(X=X, y=y)
def predict(self, context: np.ndarray) -> np.ndarray:
n_rounds = context.shape[0]
action_dist = np.zeros((n_rounds, self.n_actions, self.len_list))
for position_ in np.arange(self.len_list):
predicted_actions_at_position = self.base_classifier_list[
position_
].predict(context)
action_dist[
np.arange(n_rounds),
predicted_actions_at_position,
np.ones(n_rounds, dtype=int) * position_,
] += 1
return action_dist
@pytest.mark.parametrize(
"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model",
offline_experiment_configurations,
)
def test_offline_policy_learner_performance(
n_rounds: int,
n_actions: int,
dim_context: int,
base_model_for_evaluation_policy: str,
base_model_for_reg_model: str,
) -> None:
def process(i: int):
# synthetic data generator
dataset = SyntheticBanditDataset(
n_actions=n_actions,
dim_context=dim_context,
reward_function=logistic_reward_function,
behavior_policy_function=linear_behavior_policy,
random_state=i,
)
# sample new training and test sets of synthetic logged bandit feedback
bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# defining policy learners
ipw_policy = IPWLearner(
n_actions=dataset.n_actions,
base_classifier=base_model_dict[base_model_for_evaluation_policy](
**hyperparams[base_model_for_evaluation_policy]
),
)
q_policy = QLearner(
n_actions=dataset.n_actions,
base_model=base_model_dict[base_model_for_evaluation_policy](
**hyperparams[base_model_for_evaluation_policy]
),
)
nn_policy = NNPolicyLearner(
n_actions=dataset.n_actions,
dim_context=dim_context,
off_policy_objective="ipw",
)
# baseline method 1. RandomPolicy
random_policy = RandomPolicy(n_actions=dataset.n_actions)
# baseline method 2. UniformSampleWeightLearner
uniform_sample_weight_policy = UniformSampleWeightLearner(
n_actions=dataset.n_actions,
base_classifier=base_model_dict[base_model_for_evaluation_policy](
**hyperparams[base_model_for_evaluation_policy]
),
)
# policy training
ipw_policy.fit(
context=bandit_feedback_train["context"],
action=bandit_feedback_train["action"],
reward=bandit_feedback_train["reward"],
pscore=bandit_feedback_train["pscore"],
)
q_policy.fit(
context=bandit_feedback_train["context"],
action=bandit_feedback_train["action"],
reward=bandit_feedback_train["reward"],
pscore=bandit_feedback_train["pscore"],
)
nn_policy.fit(
context=bandit_feedback_train["context"],
action=bandit_feedback_train["action"],
reward=bandit_feedback_train["reward"],
pscore=bandit_feedback_train["pscore"],
)
uniform_sample_weight_policy.fit(
context=bandit_feedback_train["context"],
action=bandit_feedback_train["action"],
reward=bandit_feedback_train["reward"],
pscore=bandit_feedback_train["pscore"],
)
# prediction/making decisions
ipw_action_dist = ipw_policy.predict(
context=bandit_feedback_test["context"],
)
q_action_dist = q_policy.predict(
context=bandit_feedback_test["context"],
)
nn_action_dist = nn_policy.predict(
context=bandit_feedback_test["context"],
)
random_action_dist = random_policy.predict(
context=bandit_feedback_test["context"],
)
uniform_sample_weight_action_dist = uniform_sample_weight_policy.predict(
context=bandit_feedback_test["context"],
)
# evaluation
gt_ipw_learner = dataset.calc_ground_truth_policy_value(
expected_reward=bandit_feedback_test["expected_reward"],
action_dist=ipw_action_dist,
)
gt_q_learner = dataset.calc_ground_truth_policy_value(
expected_reward=bandit_feedback_test["expected_reward"],
action_dist=q_action_dist,
)
gt_nn_learner = dataset.calc_ground_truth_policy_value(
expected_reward=bandit_feedback_test["expected_reward"],
action_dist=nn_action_dist,
)
gt_random_policy = dataset.calc_ground_truth_policy_value(
expected_reward=bandit_feedback_test["expected_reward"],
action_dist=random_action_dist,
)
gt_uniform_sample_weight_learner = dataset.calc_ground_truth_policy_value(
expected_reward=bandit_feedback_test["expected_reward"],
action_dist=uniform_sample_weight_action_dist,
)
return (
gt_ipw_learner,
gt_q_learner,
gt_nn_learner,
gt_random_policy,
gt_uniform_sample_weight_learner,
)
n_runs = 10
processed = Parallel(
n_jobs=-1,
verbose=0,
)([delayed(process)(i) for i in np.arange(n_runs)])
list_gt_ipw = list()
list_gt_q = list()
list_gt_nn = list()
list_gt_random = list()
list_gt_unif_ipw = list()
for i, gt_policy_values in enumerate(processed):
gt_ipw, gt_q, gt_nn, gt_random, gt_unif_ipw = gt_policy_values
list_gt_ipw.append(gt_ipw)
list_gt_q.append(gt_q)
list_gt_nn.append(gt_nn)
list_gt_random.append(gt_random)
list_gt_unif_ipw.append(gt_unif_ipw)
# baseline learner performance
print(f"Performance of Random is {np.mean(list_gt_random)}")
print(
f"Performance of IPWLearner with Uniform Weight is {np.mean(list_gt_unif_ipw)}"
)
# ipw learner performance
print(f"Performance of IPWLearner is {np.mean(list_gt_ipw)}")
assert np.mean(list_gt_ipw) > np.mean(list_gt_random)
assert np.mean(list_gt_ipw) > np.mean(list_gt_unif_ipw)
# q learner performance
print(f"Performance of QLearner is {np.mean(list_gt_q)}")
assert np.mean(list_gt_q) > np.mean(list_gt_random)
assert np.mean(list_gt_q) > np.mean(list_gt_unif_ipw)
# nn policy learner performance
print(f"Performance of NNPolicyLearner is {np.mean(list_gt_nn)}")
assert np.mean(list_gt_nn) > np.mean(list_gt_random)
assert np.mean(list_gt_nn) > np.mean(list_gt_unif_ipw)
|
from .handler import connect_websocket, spawn, _javascript_call
from bottle.ext import websocket as bottle_websocket
import bottle
def start(port = 4949, block = True, quiet = True):
def run_server():
return bottle.run(
port = port,
quiet = quiet,
host = "0.0.0.0",
app = bottle.default_app(),
server = bottle_websocket.GeventWebSocketServer,
)
if block:
run_server()
else:
spawn(run_server)
bottle.route(
path = '/',
callback = connect_websocket,
apply = (bottle_websocket.websocket,))
|
from socrata.operations.utils import get_filename, SocrataException
from socrata.operations.operation import Operation
class ConfiguredJob(Operation):
def run(self, data, put_bytes, filename = None):
filename = get_filename(data, filename)
(ok, rev) = self.properties['view'].revisions.create_using_config(
self.properties['config']
)
if not ok:
raise SocrataException("Failed to create the revision", rev)
(ok, source) = rev.create_upload(filename)
if not ok:
raise SocrataException("Failed to create the upload", source)
(ok, source) = put_bytes(source)
if not ok:
raise SocrataException("Failed to upload the file", source)
output_schema = source.get_latest_input_schema().get_latest_output_schema()
(ok, output_schema) = output_schema.wait_for_finish()
if not ok:
raise SocrataException("The dataset failed to validate", output_schema)
(ok, job) = rev.apply(output_schema = output_schema)
if not ok:
raise SocrataException("Failed to apply the change", job)
return (rev, job)
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Topology readers --- :mod:`MDAnalysis.topology`
===============================================
This submodule contains the topology readers. A topology file supplies the list
of atoms in the system, their connectivity and possibly additional information
such as B-factors, partial charges, etc. The details depend on the file format
and not every topology file provides all (or even any) additional data. This
data is made accessible through AtomGroup properties.
As a minimum, all topology parsers will provide atom ids, atom types, masses,
resids, resnums and segids as well as assigning all atoms to residues and all
residues to segments. For systems without residues and segments, this results
in there being a single residue and segment to which all atoms belong. Often
when data is not provided by a file, it will be guessed based on other data in
the file. In the event that this happens, a UserWarning will always be issued.
The following table lists the currently supported topology formats along with
the attributes they provide.
.. _`Supported topology formats`:
.. table:: Table of Supported Topology Formats
================= ========== ================= ===================================================
Name extension attributes remarks
================= ========== ================= ===================================================
CHARMM/XPLOR PSF psf resnames, CHARMM_/XPLOR/NAMD_ topology format;
names, types, :mod:`MDAnalysis.topology.PSFParser`
charges,
bonds, angles,
dihedrals,
impropers
CHARMM CARD [#a]_ crd names, "CARD" coordinate output from CHARMM_; deals with
tempfactors, either standard or EXTended format;
resnames, :mod:`MDAnalysis.topology.CRDParser`
Brookhaven [#a]_ pdb/ent names, bonds, a simplified PDB format (as used in MD simulations)
resids, resnums, is read by default
types,
chainids,
occupancies,
bfactors,
resids, icodes,
resnames,
segids,
XPDB [#a]_ pdb As PDB except Extended PDB format as used by e.g., NAMD_
icodes (can use 5-digit residue numbers). To use, specify
the format "XPBD" explicitly:
``Universe(..., topology_format="XPDB")``.
Module :mod:`MDAnalysis.coordinates.PDB`
PQR [#a]_ pqr names, charges, PDB-like but whitespace-separated files with charge
types, and radius information as used by, e.g., APBS_.
radii, resids, :mod:`MDAnalysis.topology.PQRParser`
resnames, icodes,
segids
PDBQT [#a]_ pdbqt names, types, file format used by AutoDock_ with atom types and
altLocs, charges, partial charges. Module:
resnames, :mod:`MDAnalysis.topology.PDBQTParser`
resids,
icodes,
occupancies,
tempfactors,
segids,
GROMOS96 [#a]_ gro names, resids, GROMOS96 coordinate file (used e.g., by Gromacs_)
resnames, :mod:`MDAnalysis.topology.GROParser`
Amber top, names, charges simple Amber_ format reader (only supports a subset
prmtop, type_indices, of flags);
parm7 types, :mod:`MDAnalysis.topology.TOPParser`
resnames,
DESRES [#a]_ dms names, numbers, DESRES molecular structure reader (only supports
masses, charges, the atom and bond records) as used by Desmond_ and Anton;
chainids, resids, :mod:`MDAnalysis.topology.DMSParser`
resnames, segids,
radii,
TPR [#b]_ tpr names, types, Gromacs_ portable run input reader (limited
resids, resnames, experimental support for some of the more recent
charges, bonds, versions of the file format);
masses, moltypes, :mod:`MDAnalysis.topology.TPRParser`
molnums
ITP itp names, types, Gromacs_ include topology file;
resids, resnames, :mod:`MDAnalysis.topology.ITPParser`
charges, bonds,
masses, segids,
moltypes,
chargegroups
MOL2 [#a]_ mol2 ids, names, Tripos MOL2 molecular structure format;
types, resids, :mod:`MDAnalysis.topology.MOL2Parser`
charges, bonds,
resnames,
LAMMPS [#a]_ data ids, types, LAMMPS_ Data file parser
masses, charges, :mod:`MDAnalysis.topology.LAMMPSParser`
resids, bonds,
angles, dihedrals
LAMMPS [#a]_ lammpsdump id, masses LAMMPS_ ascii dump file reader
:mod:`MDAnalysis.topology.LAMMPSParser`
XYZ [#a]_ xyz names XYZ File Parser. Reads only the labels from atoms
and constructs minimal topology data.
:mod:`MDAnalysis.topology.XYZParser`
TXYZ [#a]_ txyz, names, atomids, Tinker_ XYZ File Parser. Reads atom labels, numbers
arc masses, types, and connectivity; masses are guessed from atoms names.
bonds :mod:`MDAnalysis.topology.TXYZParser`
GAMESS [#a]_ gms, names, GAMESS_ output parser. Read only atoms of assembly
log atomic charges, section (atom, elems and coords) and construct
topology.
:mod:`MDAnalysis.topology.GMSParser`
DL_POLY [#a]_ config, ids, names `DL_POLY`_ CONFIG or HISTORY file. Reads only the
history atom names. If atoms are written out of order, will
correct the order.
:mod:`MDAnalysis.topology.DLPolyParser`
Hoomd XML xml types, charges, `HOOMD XML`_ topology file. Reads atom types,
radii, masses masses, and charges if possible. Also reads bonds,
bonds, angles, angles, and dihedrals.
dihedrals :mod:`MDAnalysis.topology.HoomdXMLParser`
GSD [#a]_ gsd types, charges, HOOMD_ GSD topology file. Reads atom types,
radii, masses masses, and charges if possible. Also reads bonds,
bonds, angles, angles, and dihedrals.
dihedrals :mod:`MDAnalysis.topology.GSDParser`
MMTF [#a]_ mmtf altLocs, `Macromolecular Transmission Format (MMTF)`_. An
bfactors, bonds, efficient compact format for biomolecular
charges, masses, structures.
names,
occupancies,
types, icodes,
resnames, resids,
segids, models
FHIAIMS [#a]_ in names `FHI-AIMS`_ File Parser. Reads only the labels from
atoms and constructs minimal topology data.
:mod:`MDAnalysis.topology.FHIAIMSParser`
================= ========== ================= ===================================================
.. [#a] This format can also be used to provide *coordinates* so that
it is possible to create a full
:mod:`~MDAnalysis.core.universe.Universe` by simply providing
a file of this format as the sole argument to
:mod:`~MDAnalysis.core.universe.Universe`: ``u =
Universe(filename)``
.. [#b] The Gromacs TPR format contains coordinate information but
parsing coordinates from a TPR file is currently not implemented
in :mod:`~MDAnalysis.topology.TPRParser`.
Note
----
:ref:`Coordinates` with the :ref:`Supported coordinate formats`
.. _CHARMM: https://www.charmm.org/charmm/
.. _HOOMD XML: http://codeblue.umich.edu/hoomd-blue/doc/page_xml_file_format.html
.. _HOOMD: http://glotzerlab.engin.umich.edu/hoomd-blue/
.. _NAMD: http://www.ks.uiuc.edu/Research/namd/
.. _LAMMPS: https://lammps.sandia.gov/
.. _Gromacs: http://www.gromacs.org/
.. _Amber: http://ambermd.org/
.. _Desmond: https://www.deshawresearch.com/resources_desmond.html
.. _Tinker: https://dasher.wustl.edu/tinker/
.. _DL_POLY: https://www.scd.stfc.ac.uk/Pages/DL_POLY.aspx
.. _AutoDock: http://autodock.scripps.edu/
.. _APBS: https://apbs-pdb2pqr.readthedocs.io/en/latest/apbs/
.. _Macromolecular Transmission Format (MMTF): https://mmtf.rcsb.org/
.. _FHI-AIMS: https://aimsclub.fhi-berlin.mpg.de/
.. _GAMESS: https://www.msg.chem.iastate.edu/gamess/
.. _topology-parsers-developer-notes:
Developer Notes
---------------
.. versionadded:: 0.8
.. versionchanged:: 0.16.0
The new array-based topology system completely replaced the old
system that was based on a list of Atom instances.
Topology information consists of data that do not change over time,
i.e. information that is the same for all time steps of a
trajectory. This includes
* identity of atoms (name, type, number, partial charge, ...) and to
which residue and segment they belong; atoms are identified in
MDAnalysis by their :attr:`~MDAnalysis.core.groups.Atom.index`,
an integer number starting at 0 and incremented in the order of
atoms found in the topology.
* bonds (pairs of atoms)
* angles (triplets of atoms)
* dihedral angles (quadruplets of atoms) — proper and improper
dihedrals should be treated separately
Topology readers are generally called "parsers" in MDAnalysis (for
historical reasons and in order to distinguish them from coordinate
"readers"). All parsers are derived from
:class:`MDAnalysis.topology.base.TopologyReaderBase` and have a
:meth:`~MDAnalysis.topology.base.TopologyReaderBase.parse` method that
returns a :class:`MDAnalysis.core.topology.Topology` instance.
atoms
~~~~~~
The **atoms** appear to the user as an array of
:class:`~MDAnalysis.core.groups.Atom` instances. However, under the
hood this is essentially only an array of atom indices that are used
to index the various components of the topology database
:class:`~MDAnalysis.core.topology.Topology`. The parser needs to
initialize the :class:`~MDAnalysis.core.topology.Topology` with the
data read from the topology file.
See Also
--------
:ref:`topology-system-label`
bonds
~~~~~~
**Bonds** are represented as a :class:`tuple` of :class:`tuple`. Each tuple
contains two atom numbers, which indicate the atoms between which the
bond is formed. Only one of the two permutations is stored, typically
the one with the lower atom number first.
bondorder
~~~~~~~~~~
Some **bonds** have additional information called **order**. When available
this is stored in a dictionary of format {bondtuple:order}. This extra
information is then passed to Bond initialisation in u._init_bonds()
angles
~~~~~~~
**Angles** are represented by a :class:`list` of :class:`tuple`. Each
tuple contains three atom numbers. The second of these numbers
represents the apex of the angle.
dihedrals
~~~~~~~~~
**Proper dihedral angles** are represented by a :class:`list` of :class:`tuple`. Each
tuple contains four atom numbers. The angle of the torsion
is defined by the angle between the planes formed by atoms 1, 2, and 3,
and 2, 3, and 4.
impropers
~~~~~~~~~
**Improper dihedral angles** are represented by a :class:`list` of :class:`tuple`. Each
tuple contains four atom numbers. The angle of the improper torsion
is again defined by the angle between the planes formed by atoms 1, 2, and 3,
and 2, 3, and 4. Improper dihedrals differ from regular dihedrals as the
four atoms need not be sequentially bonded, and are instead often all bonded
to the second atom.
"""
from __future__ import absolute_import
__all__ = ['core', 'PSFParser', 'PDBParser', 'PQRParser', 'GROParser',
'CRDParser', 'TOPParser', 'PDBQTParser', 'TPRParser',
'LAMMPSParser', 'XYZParser', 'GMSParser', 'DLPolyParser',
'HoomdXMLParser','GSDParser', 'ITPParser', 'ParmEdParser']
from . import core
from . import PSFParser
from . import TOPParser
from . import PDBParser
from . import ExtendedPDBParser
from . import PQRParser
from . import GROParser
from . import CRDParser
from . import PDBQTParser
from . import DMSParser
from . import TPRParser
from . import MOL2Parser
from . import LAMMPSParser
from . import XYZParser
from . import TXYZParser
from . import GMSParser
from . import DLPolyParser
from . import HoomdXMLParser
from . import MMTFParser
from . import GSDParser
from . import MinimalParser
from . import ITPParser
from . import ParmEdParser
from . import FHIAIMSParser
|
"""
Test blockpy directive
"""
__author__ = 'Jovan'
import unittest
import time
from unittest import TestCase
from runestone.unittest_base import module_fixture_maker, RunestoneTestCase
from selenium.webdriver import ActionChains
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import selenium.webdriver.support.ui as ui
mf, setUpModule, tearDownModule = module_fixture_maker(__file__, True)
jquery_url = "http://code.jquery.com/jquery-1.12.4.min.js"
class BlockpyTest(RunestoneTestCase):
def test_blockly_karel_transition(self):
"""
Testing that changing Blockly will be saved in Karel code
"""
self.driver.get(self.host + "/index.html")
self.driver.execute_script('window.localStorage.clear();')
actionChains = ActionChains(self.driver)
#going into blockly window
rb = self.driver.find_element_by_class_name("blockly-button")
self.assertIsNotNone(rb)
rb.click()
#selecting and draging "move" piece on to the canvas placing it into "for loop block"
karel = self.driver.find_element_by_id(":1")
karel.click()
getBlocklyElement(self, 0)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains.click_and_hold(piece).perform()
actionChains.move_by_offset(140, 35).release(piece).perform()
#back to karel
back = self.driver.find_elements_by_class_name("btn-primary")[1]
self.assertIsNotNone(back)
back.click()
#checking if code is in sync with blockly
code = self.driver.find_element_by_class_name("CodeMirror-code")
self.assertEqual(code.text, '1\nfrom karel import * \n2\nmove()\n3')
def test_success(self):
"""
Testing a simple karel program made in blockly window.
Becouse multiple modules are tested this could be regarded as integration test.
"""
self.driver.get(self.host + "/index.html")
self.driver.execute_script('window.localStorage.clear();')
actionChains = ActionChains(self.driver)
actionChains2 = ActionChains(self.driver)
actionChains3 = ActionChains(self.driver)
actionChains4 = ActionChains(self.driver)
#going into blockly window
rb = self.driver.find_element_by_class_name("blockly-button")
self.assertIsNotNone(rb)
rb.click()
#selecting and draging "move" piece on to the canvas
karel = self.driver.find_element_by_id(":1")
self.assertIsNotNone(karel)
karel.click()
getBlocklyElement(self, 0)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains.drag_and_drop_by_offset(piece, 100, 0).perform()
#selecting and draging "move" piece on to the canvas
karel.click()
getBlocklyElement(self, 0)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains2.click_and_hold(piece).perform()
actionChains2.move_by_offset(100, 12).release(piece).perform()
#selecting and draging "move" piece on to the canvas
karel.click()
getBlocklyElement(self, 0)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains3.click_and_hold(piece).perform()
actionChains3.move_by_offset(100, 12).release(piece).perform()
#selecting and draging "pickup" piece on to the canvas
karel.click()
getBlocklyElement(self, 3)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains4.click_and_hold(piece).perform()
actionChains4.move_by_offset(100, -75).release(piece).perform()
#going back to karel
back = self.driver.find_elements_by_class_name("btn-primary")[1]
self.assertIsNotNone(back)
back.click()
#running program
run = self.driver.find_element_by_class_name("run-button")
self.assertIsNotNone(run)
run.click()
#checking if the program finished successfully
self.assertIsNotNone(self.driver.find_element_by_class_name("alert-success"))
def test_failure(self):
"""
Testing a simple karel program made in blockly window.
Checking if incorecct program produces a correct error message
"""
self.driver.get(self.host + "/index.html")
self.driver.execute_script('window.localStorage.clear();')
actionChains = ActionChains(self.driver)
#going into blockly window
rb = self.driver.find_element_by_class_name("blockly-button")
self.assertIsNotNone(rb)
rb.click()
karel = self.driver.find_element_by_id(":1")
self.assertIsNotNone(karel)
#going back to karel
back = self.driver.find_elements_by_class_name("btn-primary")[1]
self.assertIsNotNone(back)
back.click()
#running empty code
run = self.driver.find_element_by_class_name("run-button")
self.assertIsNotNone(run)
run.click()
#testing if error is displayed correctly
self.assertIsNotNone(self.driver.find_element_by_class_name("alert-danger"))
def test_loop(self):
"""
Testing options: creating variable and fusing multiple blockly blocks into one.
Becouse multiple modules are tested this could be regarded as integration test.
"""
self.driver.get(self.host + "/index.html")
self.driver.execute_script('window.localStorage.clear();')
actionChains = ActionChains(self.driver)
actionChains2 = ActionChains(self.driver)
actionChains3 = ActionChains(self.driver)
actionChains4 = ActionChains(self.driver)
actionChains5 = ActionChains(self.driver)
actionChains6 = ActionChains(self.driver)
#going into blockly window
rb = self.driver.find_element_by_class_name("blockly-button")
self.assertIsNotNone(rb)
rb.click()
#selecting and draging "for loop" piece on to the canvas
karel = self.driver.find_element_by_id(":4")
self.assertIsNotNone(karel)
karel.click()
blocklyCanvas = self.driver.find_elements_by_class_name("blocklyBlockCanvas")[1]
pice1 = blocklyCanvas.find_elements_by_tag_name("rect")[0]
time.sleep(.5)
pice1.click()
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains.drag_and_drop_by_offset(piece, 100, 0).perform()
#selecting and draging "move" piece on to the canvas placing it into "for loop block"
karel = self.driver.find_element_by_id(":1")
karel.click()
getBlocklyElement(self,0)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains2.click_and_hold(piece).perform()
actionChains2.move_by_offset(140,35).release(piece).perform()
#selecting and draging "pickup" piece on to the canvas placing it into "for loop block"
karel.click()
getBlocklyElement(self,3)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains4.click_and_hold(piece).perform()
actionChains4.move_by_offset(100,-75).release(piece).perform()
#selecting and draging "range" piece on to the canvas placing it into "for loop block"
karel = self.driver.find_element_by_id(":c")
karel.click()
getBlocklyElementRect(self,-1)
piece = self.driver.find_element_by_class_name("blocklySelected")
#init for new variable, name input is done in alert window
karel = self.driver.find_element_by_id(":2")
karel.click()
newVariable = self.driver.find_elements_by_class_name("blocklyBlockCanvas")[1].find_element_by_class_name("blocklyText")
newVariable.click()
try:
WebDriverWait(self.driver, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alertWindow = self.driver.switch_to.alert
alertWindow.send_keys('i')
alertWindow.accept()
except TimeoutException:
print("no alert")
getBlocklyElement(self,1)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains5.click_and_hold(piece).perform()
actionChains5.move_by_offset(170,-75).release(piece).perform()
#selecting and draging "range attributes" piece on to the canvas placing it into "range block"
karel = self.driver.find_element_by_id(":a")
karel.click()
getBlocklyElement(self,1)
piece = self.driver.find_element_by_class_name("blocklySelected")
actionChains6.click_and_hold(piece).perform()
actionChains6.move_by_offset(335,-40).release(piece).perform()
pieceInput = self.driver.find_element_by_class_name("blocklyEditableText")
webdriver.ActionChains(self.driver).move_to_element(pieceInput ).click(pieceInput ).perform()
time.sleep(.5)
#setting range(3)
pieceInput2 = self.driver.find_element_by_class_name("blocklyWidgetDiv").find_element_by_class_name("blocklyHtmlInput")
self.assertIsNotNone(pieceInput2)
self.driver.execute_script("arguments[0].value=3;", pieceInput2)
time.sleep(.5)
#after setting input to 3, moving focus to another element in order to save it
workSpace = self.driver.find_element_by_class_name("blocklyWorkspace")
self.assertIsNotNone(workSpace)
workSpace.click()
#back to karel
back = self.driver.find_elements_by_class_name("btn-primary")[1]
self.assertIsNotNone(back)
back.click()
run = self.driver.find_element_by_class_name("run-button")
self.assertIsNotNone(run)
run.click()
self.assertIsNotNone(self.driver.find_element_by_class_name("alert-success"))
def getBlocklyElement(self, elementNo):
blocklyCanvas = self.driver.find_elements_by_class_name("blocklyBlockCanvas")[1]
piece = blocklyCanvas.find_elements_by_class_name("blocklyDraggable")[elementNo]
time.sleep(.5)
piece.click()
def getBlocklyElementRect(self, elementNo):
blocklyCanvas = self.driver.find_elements_by_class_name("blocklyBlockCanvas")[1]
piece = blocklyCanvas.find_elements_by_tag_name("rect")[elementNo]
time.sleep(.5)
actionChains3= ActionChains(self.driver)
actionChains3.click_and_hold(piece).perform()
actionChains3.move_by_offset(-20,-20).move_by_offset(270,-210).release(piece).perform()
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import wtforms
from flask import render_template, request, Markup, abort, flash, redirect, escape, url_for, make_response
from .. import b__ as __
from .form import Form
from .fields import SubmitField
class ConfirmDeleteForm(Form):
"""
Confirm a delete operation
"""
# The labels on these widgets are not used. See delete.html.
delete = SubmitField(__(u"Delete"))
cancel = SubmitField(__(u"Cancel"))
def render_form(form, title, message='', formid='form', submit=__(u"Submit"), cancel_url=None, ajax=False):
multipart = False
for field in form:
if isinstance(field.widget, wtforms.widgets.FileInput):
multipart = True
if form.errors:
code = 200 # 400
else:
code = 200
if request.is_xhr and ajax:
return make_response(render_template('baseframe/ajaxform.html', form=form, title=title,
message=message, formid=formid, submit=submit,
cancel_url=cancel_url, multipart=multipart), code)
else:
return make_response(render_template('baseframe/autoform.html', form=form, title=title,
message=message, formid=formid, submit=submit,
cancel_url=cancel_url, ajax=ajax, multipart=multipart), code)
def render_message(title, message, code=200):
if request.is_xhr:
return make_response(Markup("<p>%s</p>" % escape(message)), code)
else:
return make_response(render_template('baseframe/message.html', title=title, message=message), code)
def render_redirect(url, code=302):
if request.is_xhr:
return make_response(render_template('baseframe/redirect.html', url=url))
else:
return redirect(url, code=code)
def render_delete_sqla(obj, db, title, message, success=u'', next=None, cancel_url=None):
if not obj:
abort(404)
form = ConfirmDeleteForm()
if request.method in ('POST', 'DELETE') and form.validate():
if 'delete' in request.form or request.method == 'DELETE':
db.session.delete(obj)
db.session.commit()
if success:
flash(success, 'success')
return render_redirect(next or url_for('index'), code=303)
else:
return render_redirect(cancel_url or next or url_for('index'), code=303)
return make_response(render_template('baseframe/delete.html', form=form, title=title, message=message))
|
#!/usr/bin/env python
import argparse
import json
import radix
import re
import subprocess
from logger import get_logger
from json_schema import json_schema
from json import JSONDecoder, JSONDecodeError
from netaddr import IPAddress, IPNetwork, IPSet
from filelock import FileLock
log = get_logger(path="/etc/artemis/automation_tools/logging.yaml", logger="auto_mitigation")
# returns a generator which seperates the json objects in file
def decode_stacked(document, pos=0, decoder=JSONDecoder()):
NOT_WHITESPACE = re.compile(r'[^\s]')
while True:
match = NOT_WHITESPACE.search(document, pos)
if not match:
return
pos = match.start()
try:
obj, pos = decoder.raw_decode(document, pos)
except JSONDecodeError as e:
raise e
yield obj
# returns a list with json objects, each object corresponds to bgp
# configuration of each router with which artemis is connected
def read_json_file(filename):
try:
json_data = []
with open(filename, 'r') as json_file:
json_stacked_data = json_file.read()
for obj in decode_stacked(json_stacked_data):
json_data.append(obj)
return json_data
except Exception as e:
raise e
# create radix-prefix tree with json data (config-file) of each router
def create_prefix_tree(json_data):
# Create a new tree
rtree = radix.Radix()
# Adding a node returns a RadixNode object. You can create
# arbitrary members in its 'data' dict to store your data.
# Each node contains a prefix (which a router anounce)
# as search value and as data --> (asn, bgp router-id, interface
# name of super-prefix of that prefix) of router
for i in json_data:
prefixes_list = i["prefixes"]
for j in prefixes_list:
mask = str(IPAddress(j["mask"]).netmask_bits())
cidr = j["network"] + "/" + mask
# find out in which interface name this subprefix match
interface_name = None
interfaces_list = i["interfaces"]
for k in interfaces_list:
interface_mask = str(IPAddress(k["interface_mask"]).netmask_bits())
interface_cidr = k["interface_ip"] + "/" + interface_mask
s1 = IPSet([interface_cidr])
s2 = IPSet([cidr])
if s1.issuperset(s2) == True:
# we found the interface of the superprefix of current subprefix
interface_name = k["interface_name"]
break
# search if prefix already exists in tree
tmp_node = rtree.search_exact(cidr)
if tmp_node == None:
# prefix does not exist
rnode = rtree.add(cidr)
rnode.data["data_list"] = []
rnode.data["data_list"].append(
(str(i["origin_as"][0]["asn"]), i["bgp_router_id"][0]["router_id"], interface_name))
else:
# prefix exist -> update list
tmp_node.data["data_list"].append(
(str(i["origin_as"][0]["asn"]), i["bgp_router_id"][0]["router_id"], interface_name))
return rtree
def prefix_deaggregation(hijacked_prefix):
subnets = list(hijacked_prefix.subnet(hijacked_prefix.prefixlen + 1))
prefix1_data = [str(subnets[0]), str(subnets[0].network), str(subnets[0].netmask)]
prefix2_data = [str(subnets[1]), str(subnets[1].network), str(subnets[1].netmask)]
return prefix1_data, prefix2_data
def isInputValid(rtree, json_data, admin_configs):
prefix_keys_list = list(admin_configs["mitigation"]["configured_prefix"].keys())
if not prefix_keys_list:
# empty prefix_keys_list
log.error("No prefixes have been added in configured_prefix dictionary !!!")
return False
else:
# list prefix_keys_list has elements
mitigation_json_schema = '{"netmask_threshold": "int","less_than_threshold": "str","equal_greater_than_threshold": "str","tunnel_definitions": {"helperAS": {"asn": "int","router_id": "str","tunnel_interface_name": "str","tunnel_interface_ip_address": "str","tunnel_interface_ip_mask": "str","tunnel_source_ip_address": "str","tunnel_source_ip_mask": "str","tunnel_destination_ip_address": "str","tunnel_destination_ip_mask": "str"}}}'
# check only the json schema
ipv4_cidr_regex = re.compile(
r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$")
for prefix in admin_configs["mitigation"]["configured_prefix"]:
if type(prefix) != str or not ipv4_cidr_regex.match(prefix):
log.error("Invalid configured prefix string-cidr format !!!")
return False
else:
mitigation_json_input = json.dumps(admin_configs["mitigation"]["configured_prefix"][prefix])
if not json_schema.match(mitigation_json_input, mitigation_json_schema):
log.error("Mitigation json input schema not matched !!!")
return False
# check the values-fields
for prefix in admin_configs["mitigation"]["configured_prefix"]:
netmask_threshold = int(admin_configs["mitigation"]["configured_prefix"][prefix]["netmask_threshold"])
less_than_threshold = admin_configs["mitigation"]["configured_prefix"][prefix]["less_than_threshold"]
equal_greater_than_threshold = admin_configs["mitigation"]["configured_prefix"][prefix][
"equal_greater_than_threshold"]
asn = int(admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"]["asn"])
router_id = admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"router_id"]
tunnel_interface_name = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_interface_name"]
tunnel_interface_ip_address = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_interface_ip_address"]
tunnel_interface_ip_mask = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_interface_ip_mask"]
tunnel_source_ip_address = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_source_ip_address"]
tunnel_source_ip_mask = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_source_ip_mask"]
tunnel_destination_ip_address = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_destination_ip_address"]
tunnel_destination_ip_mask = \
admin_configs["mitigation"]["configured_prefix"][prefix]["tunnel_definitions"]["helperAS"][
"tunnel_destination_ip_mask"]
# check prefix value
node = rtree.search_exact(prefix)
if node == None:
log.error("Invalid configured prefix !!!")
return False
# check netmask_threshold, less_than_threshold, equal_greater_than_threshold values
if netmask_threshold < 8 or netmask_threshold > 30 or less_than_threshold not in ["deaggregate", "tunnel",
"deaggregate+tunnel",
"manual"] or equal_greater_than_threshold not in [
"tunnel", "manual"]:
log.error("netmask_threshold or less_than_threshold or equal_greater_than_threshold field is invalid")
return False
# check asn, router_id, tunnel_interface_name,
# tunnel_interface_ip_address, tunnel_interface_ip_mask values
check_flag = 0
for item in json_data:
if item["origin_as"][0]["asn"] == asn and item["bgp_router_id"][0]["router_id"] == router_id:
for element in item["interfaces"]:
if element["interface_name"] == tunnel_interface_name and element[
"interface_ip"] == tunnel_interface_ip_address and element[
"interface_mask"] == tunnel_interface_ip_mask:
check_flag = 1
break
if check_flag == 1:
# fields were found
break
if check_flag == 0:
log.error(
"asn or router_id or tunnel_interface_name or tunnel_interface_ip_address or tunnel_interface_ip_mask field is invalid")
return False
# check tunnel_source_ip_address, tunnel_source_ip_mask fields
ipv4_regex = re.compile(
r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
if ipv4_regex.match(tunnel_source_ip_address) and ipv4_regex.match(tunnel_source_ip_mask):
mask = str(IPAddress(tunnel_source_ip_mask).netmask_bits())
cidr = tunnel_source_ip_address + "/" + mask
prefix = str(IPNetwork(cidr).network) + "/" + mask
node = rtree.search_exact(prefix)
if node != None:
log.error("Physical source interface of tunnel must not has hijacked ip !!!")
return False
else:
check_flag = 0
for item in json_data:
for element in item["interfaces"]:
if element["interface_ip"] == tunnel_source_ip_address and element[
"interface_mask"] == tunnel_source_ip_mask:
check_flag = 1
break
if check_flag == 1:
# fields were found
break
if check_flag == 0:
log.error("Invalid physical tunnnel source interface !!!")
return False
else:
log.error("Invalid physical tunnel_source_ip_address or tunnel_source_ip_mask format !!!")
return False
# check tunnel_destination_ip_address, tunnel_destination_ip_mask fields
if ipv4_regex.match(tunnel_destination_ip_address) and ipv4_regex.match(tunnel_destination_ip_mask):
mask = str(IPAddress(tunnel_destination_ip_mask).netmask_bits())
cidr = tunnel_destination_ip_address + "/" + mask
prefix = str(IPNetwork(cidr).network) + "/" + mask
node = rtree.search_exact(prefix)
if node != None:
log.error("Physical destination interface of tunnel must not has hijacked ip !!!")
return False
else:
check_flag = 0
for item in json_data:
for element in item["interfaces"]:
if element["interface_ip"] == tunnel_destination_ip_address and element[
"interface_mask"] == tunnel_destination_ip_mask:
check_flag = 1
break
if check_flag == 1:
# fields were found
break
if check_flag == 0:
log.error("Invalid physical tunnnel destination interface !!!")
return False
else:
log.error("Invalid physical tunnel_destination_ip_address or tunnel_destination_ip_mask format !!!")
return False
return True
def deaggregation_technique(hijacked_prefix, rtree, admin_configs):
##perform prefix-deaggregation technique
prefix1_data, prefix2_data = prefix_deaggregation(hijacked_prefix)
# Best-match search will return the longest matching prefix
# that contains the search term (routing-style lookup)
rnode = rtree.search_best(str(hijacked_prefix.ip))
# call mitigation playbook for each
# tuple in longest prefix match node
for ttuple in rnode.data["data_list"]:
host = "target=" + ttuple[0] + ":&" + ttuple[0] + "_" + ttuple[1] + " asn=" + ttuple[0]
prefixes_str = " pr1_cidr=" + prefix1_data[0] + " pr1_network=" + prefix1_data[1] + " pr1_netmask=" + \
prefix1_data[2] + " pr2_cidr=" + prefix2_data[0] + " pr2_network=" + prefix2_data[
1] + " pr2_netmask=" + prefix2_data[2] + " interface_name=" + ttuple[2]
cla = host + prefixes_str
arg = "ansible-playbook -i " + admin_configs["ansible_hosts_file_path"] + " " + admin_configs[
"mitigation_playbook_path"] + " --extra-vars " + "\"" + cla + "\""
subprocess.call(arg, shell=True)
def tunnel_technique(hijacked_prefix, json_prefix_key, rtree, admin_configs):
##perform tunnel technique
# Best-match search will return the longest matching prefix
# that contains the search term (routing-style lookup)
rnode = rtree.search_best(str(hijacked_prefix.ip))
# call mitigation playbook for each
# tuple in longest prefix match node
for ttuple in rnode.data["data_list"]:
host = "target=" + ttuple[0] + ":&" + ttuple[0] + "_" + ttuple[1] + " asn=" + ttuple[0]
prefixes_str = " pr_cidr=" + str(hijacked_prefix.cidr) + " pr_network=" + str(
hijacked_prefix.ip) + " pr_netmask=" + str(hijacked_prefix.netmask) + " interface_name=" + ttuple[2]
cla = host + prefixes_str
arg = "ansible-playbook -i " + admin_configs["ansible_hosts_file_path"] + " " + admin_configs[
"tunnel_mitigation_playbook_path"] + " --extra-vars " + "\"" + cla + "\""
subprocess.call(arg, shell=True)
# call tunnel_mitigation_playbook for helper as
# to redirect traffic into the tunnel
prefix_key = admin_configs["mitigation"]["configured_prefix"][json_prefix_key]["tunnel_definitions"]
host = "target=" + str(prefix_key["helperAS"]["asn"]) + ":&" + str(prefix_key["helperAS"]["asn"]) + "_" + \
prefix_key["helperAS"][
"router_id"] + " asn=" + str(prefix_key["helperAS"]["asn"])
prefixes_str = " pr_cidr=" + str(hijacked_prefix.cidr) + " pr_network=" + str(
hijacked_prefix.ip) + " pr_netmask=" + str(hijacked_prefix.netmask) + " interface_name=" + \
str(prefix_key["helperAS"]["tunnel_interface_name"])
cla = host + prefixes_str
arg = "ansible-playbook -i " + admin_configs["ansible_hosts_file_path"] + " " + admin_configs[
"tunnel_mitigation_playbook_path"] + " --extra-vars " + "\"" + cla + "\""
subprocess.call(arg, shell=True)
def mitigate_prefix(hijack_json, json_data, admin_configs):
hijacked_prefix = IPNetwork(json.loads(hijack_json)["prefix"])
rtree = create_prefix_tree(json_data)
if not isInputValid(rtree, json_data, admin_configs):
log.error("Invalid json input !!!")
return
else:
json_prefix_key = ""
for prefix in list(admin_configs["mitigation"]["configured_prefix"].keys()):
if IPSet([prefix]).issuperset(IPSet([hijacked_prefix.cidr])):
## we found the tunnel configs for this prefix
json_prefix_key = prefix
break
if json_prefix_key == "":
# or you can apply a default mitigation method
log.error("Mitigation definition for this prefix not found")
return
else:
# perform user mitigation technique
netmask_threshold = admin_configs["mitigation"]["configured_prefix"][json_prefix_key]["netmask_threshold"]
less_than_threshold = admin_configs["mitigation"]["configured_prefix"][json_prefix_key][
"less_than_threshold"]
equal_greater_than_threshold = admin_configs["mitigation"]["configured_prefix"][json_prefix_key][
"equal_greater_than_threshold"]
if hijacked_prefix.prefixlen < netmask_threshold:
if less_than_threshold == "deaggregate":
# perform prefix-deaggregation technique
deaggregation_technique(hijacked_prefix, rtree, admin_configs)
elif less_than_threshold == "tunnel":
# perform tunnel technique
tunnel_technique(hijacked_prefix, json_prefix_key, rtree, admin_configs)
elif less_than_threshold == "deaggregate+tunnel":
# perform deaggregation and tunnel technique
deaggregation_technique(hijacked_prefix, rtree, admin_configs)
tunnel_technique(hijacked_prefix, json_prefix_key, rtree, admin_configs)
else:
# manual
log.info("Manual mitigation !!!")
else:
if equal_greater_than_threshold == "tunnel":
# perform tunnel technique
tunnel_technique(hijacked_prefix, json_prefix_key, rtree, admin_configs)
else:
# manual
log.info("Manual mitigation !!!")
def main():
log.info("Starting mitigation...")
try:
parser = argparse.ArgumentParser(description="ARTEMIS mitigation")
parser.add_argument("-i", "--info_hijack", dest="info_hijack", type=str, help="hijack event information",
required=True)
hijack_arg = parser.parse_args()
# creation (if not exists) of file result.txt.lock in shared /tmp
# directory in order to implement lock-unlock technique to results.json
with open('/tmp/result.json.lock', 'w'):
pass
# we need this lock to elimininate concurrent access to results.json
# from other processes (auto mitigation mechanism) at the same time
lock = FileLock("/tmp/result.json.lock")
with lock.acquire(timeout=-1, poll_intervall=0.05):
# If timeout <= 0, there is no timeout and this
# method will block until the lock could be acquired
with open("/root/admin_configs.json") as json_file:
admin_configs = json.load(json_file)
json_data = read_json_file(admin_configs["bgp_results_path"])
mitigate_prefix(hijack_arg.info_hijack, json_data, admin_configs)
except Exception as e:
log.error(e, exc_info=True)
log.info("Stoping mitigation...")
if __name__ == '__main__':
main()
|
"""The ``metric_stats`` module provides an abstract class for storing
statistics produced over the course of an experiment and summarizing them.
Authors:
* Peter Plantinga 2020
* Mirco Ravanelli 2020
"""
import torch
from joblib import Parallel, delayed
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.edit_distance import wer_summary, wer_details_for_batch
from speechbrain.dataio.dataio import merge_char, split_word
from speechbrain.dataio.wer import print_wer_summary, print_alignments
class MetricStats:
"""A default class for storing and summarizing arbitrary metrics.
More complex metrics can be created by sub-classing this class.
Arguments
---------
metric : function
The function to use to compute the relevant metric. Should take
at least two arguments (predictions and targets) and can
optionally take the relative lengths of either or both arguments.
Not usually used in sub-classes.
batch_eval: bool
When True it feeds the evaluation metric with the batched input.
When False and n_jobs=1, it performs metric evaluation one-by-one
in a sequential way. When False and n_jobs>1, the evaluation
runs in parallel over the different inputs using joblib.
n_jobs : int
The number of jobs to use for computing the metric. If this is
more than one, every sample is processed individually, otherwise
the whole batch is passed at once.
Example
-------
>>> from speechbrain.nnet.losses import l1_loss
>>> loss_stats = MetricStats(metric=l1_loss)
>>> loss_stats.append(
... ids=["utterance1", "utterance2"],
... predictions=torch.tensor([[0.1, 0.2], [0.2, 0.3]]),
... targets=torch.tensor([[0.1, 0.2], [0.1, 0.2]]),
... reduction="batch",
... )
>>> stats = loss_stats.summarize()
>>> stats['average']
0.050...
>>> stats['max_score']
0.100...
>>> stats['max_id']
'utterance2'
"""
def __init__(self, metric, n_jobs=1, batch_eval=True):
self.metric = metric
self.n_jobs = n_jobs
self.batch_eval = batch_eval
self.clear()
def clear(self):
"""Creates empty container for storage, removing existing stats."""
self.scores = []
self.ids = []
self.summary = {}
def append(self, ids, *args, **kwargs):
"""Store a particular set of metric scores.
Arguments
---------
ids : list
List of ids corresponding to utterances.
*args, **kwargs
Arguments to pass to the metric function.
"""
self.ids.extend(ids)
# Batch evaluation
if self.batch_eval:
scores = self.metric(*args, **kwargs).detach()
else:
if "predict" not in kwargs or "target" not in kwargs:
raise ValueError(
"Must pass 'predict' and 'target' as kwargs if batch_eval=False"
)
if self.n_jobs == 1:
# Sequence evaluation (loop over inputs)
scores = sequence_evaluation(metric=self.metric, **kwargs)
else:
# Multiprocess evaluation
scores = multiprocess_evaluation(
metric=self.metric, n_jobs=self.n_jobs, **kwargs
)
self.scores.extend(scores)
def summarize(self, field=None):
"""Summarize the metric scores, returning relevant stats.
Arguments
---------
field : str
If provided, only returns selected statistic. If not,
returns all computed statistics.
Returns
-------
float or dict
Returns a float if ``field`` is provided, otherwise
returns a dictionary containing all computed stats.
"""
min_index = torch.argmin(torch.tensor(self.scores))
max_index = torch.argmax(torch.tensor(self.scores))
self.summary = {
"average": float(sum(self.scores) / len(self.scores)),
"min_score": float(self.scores[min_index]),
"min_id": self.ids[min_index],
"max_score": float(self.scores[max_index]),
"max_id": self.ids[max_index],
}
if field is not None:
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream, verbose=False):
"""Write all relevant statistics to file.
Arguments
---------
filestream : file-like object
A stream for the stats to be written to.
verbose : bool
Whether to also print the stats to stdout.
"""
if not self.summary:
self.summarize()
message = f"Average score: {self.summary['average']}\n"
message += f"Min error: {self.summary['min_score']} "
message += f"id: {self.summary['min_id']}\n"
message += f"Max error: {self.summary['max_score']} "
message += f"id: {self.summary['max_id']}\n"
filestream.write(message)
if verbose:
print(message)
def multiprocess_evaluation(metric, predict, target, lengths=None, n_jobs=8):
"""Runs metric evaluation if parallel over multiple jobs."""
if lengths is not None:
lengths = (lengths * predict.size(1)).round().int().cpu()
predict = [p[:length].cpu() for p, length in zip(predict, lengths)]
target = [t[:length].cpu() for t, length in zip(target, lengths)]
while True:
try:
scores = Parallel(n_jobs=n_jobs, timeout=30)(
delayed(metric)(p, t) for p, t in zip(predict, target)
)
break
except Exception as e:
print(e)
print("Evaluation timeout...... (will try again)")
return scores
def sequence_evaluation(metric, predict, target, lengths=None):
"""Runs metric evaluation sequentially over the inputs."""
if lengths is not None:
lengths = (lengths * predict.size(1)).round().int().cpu()
predict = [p[:length].cpu() for p, length in zip(predict, lengths)]
target = [t[:length].cpu() for t, length in zip(target, lengths)]
scores = []
for p, t in zip(predict, target):
score = metric(p, t)
scores.append(score)
return scores
class ErrorRateStats(MetricStats):
"""A class for tracking error rates (e.g., WER, PER).
Arguments
---------
merge_tokens : bool
Whether to merge the successive tokens (used for e.g.,
creating words out of character tokens).
See ``speechbrain.dataio.dataio.merge_char``.
split_tokens : bool
Whether to split tokens (used for e.g. creating
characters out of word tokens).
See ``speechbrain.dataio.dataio.split_word``.
space_token : str
The character to use for boundaries. Used with ``merge_tokens``
this represents character to split on after merge.
Used with ``split_tokens`` the sequence is joined with
this token in between, and then the whole sequence is split.
Example
-------
>>> cer_stats = ErrorRateStats()
>>> i2l = {0: 'a', 1: 'b'}
>>> cer_stats.append(
... ids=['utterance1'],
... predict=torch.tensor([[0, 1, 1]]),
... target=torch.tensor([[0, 1, 0]]),
... target_len=torch.ones(1),
... ind2lab=lambda batch: [[i2l[int(x)] for x in seq] for seq in batch],
... )
>>> stats = cer_stats.summarize()
>>> stats['WER']
33.33...
>>> stats['insertions']
0
>>> stats['deletions']
0
>>> stats['substitutions']
1
"""
def __init__(self, merge_tokens=False, split_tokens=False, space_token="_"):
self.clear()
self.merge_tokens = merge_tokens
self.split_tokens = split_tokens
self.space_token = space_token
def append(
self,
ids,
predict,
target,
predict_len=None,
target_len=None,
ind2lab=None,
):
"""Add stats to the relevant containers.
* See MetricStats.append()
Arguments
---------
ids : list
List of ids corresponding to utterances.
predict : torch.tensor
A predicted output, for comparison with the target output
target : torch.tensor
The correct reference output, for comparison with the prediction.
predict_len : torch.tensor
The predictions relative lengths, used to undo padding if
there is padding present in the predictions.
target_len : torch.tensor
The target outputs' relative lengths, used to undo padding if
there is padding present in the target.
ind2lab : callable
Callable that maps from indices to labels, operating on batches,
for writing alignments.
"""
self.ids.extend(ids)
if predict_len is not None:
predict = undo_padding(predict, predict_len)
if target_len is not None:
target = undo_padding(target, target_len)
if ind2lab is not None:
predict = ind2lab(predict)
target = ind2lab(target)
if self.merge_tokens:
predict = merge_char(predict, space=self.space_token)
target = merge_char(target, space=self.space_token)
if self.split_tokens:
predict = split_word(predict, space=self.space_token)
target = split_word(target, space=self.space_token)
scores = wer_details_for_batch(ids, target, predict, True)
self.scores.extend(scores)
def summarize(self, field=None):
"""Summarize the error_rate and return relevant statistics.
* See MetricStats.summarize()
"""
self.summary = wer_summary(self.scores)
# Add additional, more generic key
self.summary["error_rate"] = self.summary["WER"]
if field is not None:
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream):
"""Write all relevant info (e.g., error rate alignments) to file.
* See MetricStats.write_stats()
"""
if not self.summary:
self.summarize()
print_wer_summary(self.summary, filestream)
print_alignments(self.scores, filestream)
class BinaryMetricStats(MetricStats):
"""Tracks binary metrics, such as precision, recall, F1, EER, etc.
"""
def __init__(self, positive_label=1):
self.clear()
self.positive_label = positive_label
def clear(self):
self.ids = []
self.scores = []
self.labels = []
self.summary = {}
def append(self, ids, scores, labels):
"""Appends scores and labels to internal lists.
Does not compute metrics until time of summary, since
automatic thresholds (e.g., EER) need full set of scores.
Arguments
---------
ids : list
The string ids for the samples
"""
self.ids.extend(ids)
self.scores.extend(scores.detach())
self.labels.extend(labels.detach())
def summarize(self, field=None, threshold=None, beta=1, eps=1e-8):
"""Compute statistics using a full set of scores.
Full set of fields:
- TP - True Positive
- TN - True Negative
- FP - False Positive
- FN - False Negative
- FAR - False Acceptance Rate
- FRR - False Rejection Rate
- DER - Detection Error Rate (EER if no threshold passed)
- precision - Precision (positive predictive value)
- recall - Recall (sensitivity)
- F-score - Balance of precision and recall (equal if beta=1)
- MCC - Matthews Correlation Coefficient
Arguments
---------
field : str
A key for selecting a single statistic. If not provided,
a dict with all statistics is returned.
threshold : float
If no threshold is provided, equal error rate is used.
beta : float
How much to weight precision vs recall in F-score. Default
of 1. is equal weight, while higher values weight recall
higher, and lower values weight precision higher.
eps : float
A small value to avoid dividing by zero.
"""
if isinstance(self.scores, list):
self.scores = torch.stack(self.scores)
self.labels = torch.stack(self.labels)
if threshold is None:
positive_scores = self.scores[self.labels.nonzero(as_tuple=True)]
negative_scores = self.scores[
self.labels[self.labels == 0].nonzero(as_tuple=True)
]
eer, threshold = EER(positive_scores, negative_scores)
pred = (self.scores >= threshold).float()
true = self.labels
TP = self.summary["TP"] = float(pred.mul(true).sum())
TN = self.summary["TN"] = float((1.0 - pred).mul(1.0 - true).sum())
FP = self.summary["FP"] = float(pred.mul(1.0 - true).sum())
FN = self.summary["FN"] = float((1.0 - pred).mul(true).sum())
self.summary["FAR"] = FP / (FP + TN + eps)
self.summary["FRR"] = FN / (TP + FN + eps)
self.summary["DER"] = (FP + FN) / (TP + TN + eps)
self.summary["precision"] = TP / (TP + FP + eps)
self.summary["recall"] = TP / (TP + FN + eps)
self.summary["F-score"] = (
(1.0 + beta ** 2.0)
* TP
/ ((1.0 + beta ** 2.0) * TP + beta ** 2.0 * FN + FP)
)
self.summary["MCC"] = (TP * TN - FP * FN) / (
(TP + FP) * (TP + FN) * (TN + FP) * (TN + FN) + eps
) ** 0.5
if field is not None:
return self.summary[field]
else:
return self.summary
def EER(positive_scores, negative_scores):
"""Computes the EER (and its threshold).
Arguments
---------
positive_scores : torch.tensor
The scores from entries of the same class.
negative_scores : torch.tensor
The scores from entries of different classes.
Example
-------
>>> positive_scores = torch.tensor([0.6, 0.7, 0.8, 0.5])
>>> negative_scores = torch.tensor([0.4, 0.3, 0.2, 0.1])
>>> val_eer, threshold = EER(positive_scores, negative_scores)
>>> val_eer
0.0
"""
# Computing candidate thresholds
thresholds, _ = torch.sort(torch.cat([positive_scores, negative_scores]))
thresholds = torch.unique(thresholds)
# Adding intermediate thresholds
interm_thresholds = (thresholds[0:-1] + thresholds[1:]) / 2
thresholds, _ = torch.sort(torch.cat([thresholds, interm_thresholds]))
# Computing False Rejection Rate (miss detection)
positive_scores = torch.cat(
len(thresholds) * [positive_scores.unsqueeze(0)]
)
pos_scores_threshold = positive_scores.transpose(0, 1) <= thresholds
FRR = (pos_scores_threshold.sum(0)).float() / positive_scores.shape[1]
del positive_scores
del pos_scores_threshold
# Computing False Acceptance Rate (false alarm)
negative_scores = torch.cat(
len(thresholds) * [negative_scores.unsqueeze(0)]
)
neg_scores_threshold = negative_scores.transpose(0, 1) > thresholds
FAR = (neg_scores_threshold.sum(0)).float() / negative_scores.shape[1]
del negative_scores
del neg_scores_threshold
# Finding the threshold for EER
min_index = (FAR - FRR).abs().argmin()
# It is possible that eer != fpr != fnr. We return (FAR + FRR) / 2 as EER.
EER = (FAR[min_index] + FRR[min_index]) / 2
return float(EER), float(thresholds[min_index])
def minDCF(
positive_scores, negative_scores, c_miss=1.0, c_fa=1.0, p_target=0.01
):
"""Computes the minDCF metric normally used to evaluate speaker verification
systems. The min_DCF is the minimum of the following C_det function computed
within the defined threshold range:
C_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 -p_target)
where p_miss is the missing probability and p_fa is the probability of having
a false alarm.
Arguments
---------
positive_scores : torch.tensor
The scores from entries of the same class.
negative_scores : torch.tensor
The scores from entries of different classes.
c_miss : float
Cost assigned to a missing error (default 1.0).
c_fa : float
Cost assigned to a false alarm (default 1.0).
p_target: float
Prior probability of having a target (default 0.01).
Example
-------
>>> positive_scores = torch.tensor([0.6, 0.7, 0.8, 0.5])
>>> negative_scores = torch.tensor([0.4, 0.3, 0.2, 0.1])
>>> val_minDCF, threshold = minDCF(positive_scores, negative_scores)
>>> val_minDCF
0.0
"""
# Computing candidate thresholds
thresholds, _ = torch.sort(torch.cat([positive_scores, negative_scores]))
thresholds = torch.unique(thresholds)
# Adding intermediate thresholds
interm_thresholds = (thresholds[0:-1] + thresholds[1:]) / 2
thresholds, _ = torch.sort(torch.cat([thresholds, interm_thresholds]))
# Computing False Rejection Rate (miss detection)
positive_scores = torch.cat(
len(thresholds) * [positive_scores.unsqueeze(0)]
)
pos_scores_threshold = positive_scores.transpose(0, 1) <= thresholds
p_miss = (pos_scores_threshold.sum(0)).float() / positive_scores.shape[1]
del positive_scores
del pos_scores_threshold
# Computing False Acceptance Rate (false alarm)
negative_scores = torch.cat(
len(thresholds) * [negative_scores.unsqueeze(0)]
)
neg_scores_threshold = negative_scores.transpose(0, 1) > thresholds
p_fa = (neg_scores_threshold.sum(0)).float() / negative_scores.shape[1]
del negative_scores
del neg_scores_threshold
c_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 - p_target)
c_min, min_index = torch.min(c_det, dim=0)
return float(c_min), float(thresholds[min_index])
|
import flash
from flash import download_data
from flash.text import SummarizationData, SummarizationTask
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/xsum.zip", 'data/')
# 2. Load the data
datamodule = SummarizationData.from_files(
train_file="data/xsum/train.csv",
valid_file="data/xsum/valid.csv",
test_file="data/xsum/test.csv",
input="input",
target="target"
)
# 3. Build the model
model = SummarizationTask()
# 4. Create the trainer. Run once on data
trainer = flash.Trainer(max_epochs=1)
# 5. Fine-tune the model
trainer.finetune(model, datamodule=datamodule)
# 6. Test model
trainer.test()
# 7. Save it!
trainer.save_checkpoint("summarization_model_xsum.pt")
|
import logging
import os
import time
from brownie import chain
from yearn.treasury.treasury import Treasury
from yearn.outputs import victoria
logger = logging.getLogger('yearn.treasury_exporter')
sleep_interval = int(os.environ.get('SLEEP_SECONDS', '0'))
def main():
treasury = Treasury(watch_events_forever=True)
for block in chain.new_blocks(height_buffer=12):
start_time = time.time()
treasury.export(block.number, block.timestamp)
duration = time.time() - start_time
victoria.export_duration(duration, 1, "treasury_forwards", block.timestamp)
time.sleep(sleep_interval)
|
import time
import vlc
player = vlc.MediaPlayer("background-sounds/countdown.mp4")
player.play()
time.sleep(10)
player.stop()
|
"""Helper module for handling OS related commands."""
import os
import platform
import subprocess
from phytoolkit.exception.installationexception import InstallationException
class OsHelper:
"""Runs shell commands."""
def __init__(self, config):
self.config = config
self.console = config.console
self.current_dir = os.getcwd()
self.system = platform.system()
self.release = platform.release()
self.version = platform.version()
def get_as_string(self):
"""Returns OS configuration."""
return "%s - %s - %s" % (self.system, self.release, self.version)
def validate(self):
"""Validates OS."""
supported_platforms = ["Linux", ]
self.console.verbose_info(
"Validating against supported platforms %s." % ", ".join(supported_platforms))
if self.system not in supported_platforms:
raise InstallationException("Unsupported platform %s" % self.system)
self.console.verbose_success("Platform %s is supported." % self.system)
self.console.verbose_info("Checking for variant 'Ubuntu' in version.")
if "Ubuntu" not in self.version:
raise InstallationException(
"Unsupported variant %s. Only 'Ubuntu' supported." % self.version)
self.console.verbose_success("Variant %s is supported." % self.version)
self.console.verbose_info("Installing to destination directory %s." % self.config.dest_dir)
if not os.path.exists(self.config.dest_dir):
os.makedirs(self.config.dest_dir)
self.console.verbose_success("Destination directory created.")
def run_shell_command(self, command, cwd=""):
"""Runs a shell command."""
if cwd == "":
cwd = self.config.dest_dir
self.console.verbose_info("Running command %s from %s." % (" ".join(command), cwd))
output = subprocess.run(command, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=True)
self.config.log_file.write(output.stdout.decode("UTF-8"))
self.config.log_file.write(output.stderr.decode("UTF-8"))
self.console.verbose_info("Command exited with status %s." % output.returncode)
return output.returncode == 0
def install_packages(self, packages, cwd=""):
"""Short cut for installing apt packages."""
self.console.verbose_info("Installing packages %s." % ", ".join(packages))
return self.run_shell_command(["sudo", "apt", "install", "-y"] + packages, cwd)
def extract_tar_file(self, file, cwd=""):
"""Extracts tar file."""
self.console.verbose_info("Extracting tar file %s." % file)
return self.run_shell_command(["tar", "xf", file], cwd)
def write_file(self, file, content):
"""Writes content into file."""
self.console.verbose_info("Writing contents\n %s \nto file %s." % (content, file))
with open(file, "w") as file_handle:
file_handle.write(content)
self.console.verbose_success("File write completed.")
def append_file(self, file, content):
"""Appends content into file."""
self.console.verbose_info("Appending contents\n %s \nto file %s." % (content, file))
with open(file, "a") as file_handle:
file_handle.write(content)
self.console.verbose_success("File append completed.")
|
#!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
API for Host Information
Should return data about the host & return the collections for this particular host.
```swagger-yaml
/hostcollections/{host_id}/ :
x-cached-length: "Every Midnight"
get:
description: |
Designed to grab the latest collections from the host. Grabs the fresh
ones as of yesterday Midnight. Only grabs one collection for each type/subtype
responses:
200:
description: OK
tags:
- collections
- hosts
parameters:
- name: host_id
in: path
description: |
The id of the host you wish to get data for.
schema:
type: integer
required: true
- name: ctype
in: query
description: |
A regex to match for the collection_type. [PCRE](https://mariadb.com/kb/en/mariadb/regexp/) type
regular expressions are accepted. Matched on the hostname column in the collection table.
schema:
type: string
required: false
{{ exact | indent(6, True) }}
{{ col | indent(6, True) }}
```
'''
import json
import ast
import time
import os
import hashlib
from flask import current_app, Blueprint, g, request, jsonify, send_from_directory, abort
import manoward
hostcollections = Blueprint('api2_hostcollections', __name__)
@hostcollections.route("/hostcollections/", methods=['GET'])
@hostcollections.route("/hostcollections/<int:host_id>", methods=['GET'])
@hostcollections.route("/hostcollections/<int:host_id>/", methods=['GET'])
def api2_hostcollections(host_id=0):
args_def = {"hostid": {"req_type": int,
"default": host_id,
"required": True,
"positive": True,
"sql_param": True,
"sql_clause": " fk_host_id = %s "},
"ctype": {"req_type": str,
"default": None,
"required": False,
"sql_param": True,
"sql_clause": "collection.collection_type REGEXP %s",
"sql_exact_clause": "collection.collection_type = %s",
"qdeparse": True}
}
args = manoward.process_args(args_def,
request.args,
coll_lulimit=g.twoDayTimestamp,
include_coll_sql=True,
include_exact=True)
meta_dict = dict()
request_data = list()
links_dict = dict()
meta_dict["version"] = 2
meta_dict["name"] = "Jellyfish API Version 2 Host Results for Host ID {}".format(
args["hostid"])
meta_dict["status"] = "In Progress"
links_dict["parent"] = "{}{}/".format(g.config_items["v2api"]["preroot"],
g.config_items["v2api"]["root"])
links_dict["self"] = "{}{}/hostinfo/{}?{}".format(g.config_items["v2api"]["preroot"],
g.config_items["v2api"]["root"],
args["hostid"],
args["qdeparsed_string"])
requesttype = "host_collections"
host_collections_query = '''select collection_id, fk_host_id,
UNIX_TIMESTAMP(initial_update) as initial_update,
UNIX_TIMESTAMP(collection.last_update) as last_update,
hostname, pop, srvtype, hoststatus,
UNIX_TIMESTAMP(hosts.last_update) as hlast_update,
collection_type, collection_subtype, collection_value
from collection
join hosts on collection.fk_host_id = hosts.host_id
where {}
group by collection_type, collection_subtype'''.format(" and ".join(args["args_clause"]))
results = manoward.run_query(g.cur,
host_collections_query,
args=args["args_clause_args"],
one=False,
do_abort=True,
require_results=False)
meta_dict["host_information"] = dict()
if len(results.get("data", list())) > 0:
# Inject some Meta Data
hostzero = results.get("data", list())[0]
g.logger.debug(hostzero)
meta_dict["host_information"]["hostname"] = hostzero["hostname"]
meta_dict["host_information"]["pop"] = hostzero["pop"]
meta_dict["host_information"]["srvtype"] = hostzero["srvtype"]
meta_dict["host_information"]["hoststatus"] = hostzero["hoststatus"]
meta_dict["host_information"]["last_update"] = hostzero["hlast_update"]
else:
meta_dict["host_information"]["hostname"] = "No Results"
meta_dict["host_information"]["pop"] = str()
meta_dict["host_information"]["srvtype"] = str()
meta_dict["host_information"]["hoststatus"] = str()
meta_dict["host_information"]["last_update"] = 0
for this_coll in results.get("data", list()):
this_results = dict()
this_results["type"] = requesttype
this_results["id"] = this_coll["collection_id"]
this_results["attributes"] = this_coll
this_results["relationships"] = dict()
# Now pop this onto request_data
request_data.append(this_results)
return jsonify(meta=meta_dict, data=request_data, links=links_dict)
|
import sys
import io
import time
import pprint
input_txt = """
6
30 35
35 15
15 5
5 10
10 20
20 25
"""
sys.stdin = io.StringIO(input_txt)
tmp = input()
start = time.time()
# copy the below part and paste to the submission form.
# ---------function------------
def least_multiplication(mtx):
N = len(mtx)
mul = [[None] * N for x in range(N)]
for i in range(N):
mul[i][i] = 0
#pprint.pprint(mul)
for chain in range(1, N):
for begin in range(N-chain):
end = begin + chain
cand = []
for j in range(begin, end):
left = mul[begin][j]
down = mul[j+1][end]
this = mtx[begin][0] * mtx[j][1] * mtx[end][1]
cand.append(left + down + this)
mul[begin][begin+chain] = min(cand)
#pprint.pprint(mul)
return mul[0][N-1]
def main():
n = int(input())
matrices = []
for i in range(n):
col, row = map(int, input().split())
matrices .append((col, row))
ans = least_multiplication(matrices)
print(ans)
main()
# -----------------------------
print("elapsed:", time.time()-start)
sys.stdin = sys.__stdin__
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : fib_coroutine.py
# @Author: yubo
# @Date : 2019/1/15
# @Desc :
from bisect import insort
from collections import deque
from functools import partial
from time import time
import selectors
import sys
import types
from functools import wraps
def log_execution_time(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time()
return_value = func(*args, **kwargs)
message = "Executing {} took {:.03} seconds.".format(func.__name__,
time() - start)
print(message)
return return_value
return wrapper
# def fib(n):
# return fib(n - 1) + fib(n - 2) if n > 1 else n
def fib(n):
if n <= 1:
yield n
else:
a = yield fib(n - 1)
b = yield fib(n - 2)
yield a + b
timed_fib = log_execution_time(fib)
class sleep_for_seconds(object):
"""
Yield an object of this type from a coroutine to have it "sleep" for the
given number of seconds.
"""
def __init__(self, wait_time):
self._wait_time = wait_time
class EventLoop(object):
"""
Implements a simplified coroutine-based event loop as a demonstration.
Very similar to the "Trampoline" example in PEP 342, with exception
handling taken out for simplicity, and selectors added to handle file IO
"""
def __init__(self, *tasks):
self._running = False
self._selector = selectors.DefaultSelector()
# Queue of functions scheduled to run
self._tasks = deque(tasks)
# (coroutine, stack) pair of tasks waiting for input from stdin
self._tasks_waiting_on_stdin = []
# List of (time_to_run, task) pairs, in sorted order
self._timers = []
# Register for polling stdin for input to read
self._selector.register(sys.stdin, selectors.EVENT_READ)
def resume_task(self, coroutine, value=None, stack=()):
result = coroutine.send(value)
if isinstance(result, types.GeneratorType):
self.schedule(result, None, (coroutine, stack))
elif isinstance(result, sleep_for_seconds):
self.schedule(coroutine, None, stack, time() + result._wait_time)
elif result is sys.stdin:
self._tasks_waiting_on_stdin.append((coroutine, stack))
elif stack:
self.schedule(stack[0], result, stack[1])
def schedule(self, coroutine, value=None, stack=(), when=None):
"""
Schedule a coroutine task to be run, with value to be sent to it, and
stack containing the coroutines that are waiting for the value yielded
by this coroutine.
"""
# Bind the parameters to a function to be scheduled as a function with
# no parameters.
task = partial(self.resume_task, coroutine, value, stack)
if when:
insort(self._timers, (when, task))
else:
self._tasks.append(task)
def stop(self):
self._running = False
def do_on_next_tick(self, func, *args, **kwargs):
self._tasks.appendleft(partial(func, *args, **kwargs))
def run_forever(self):
self._running = True
while self._running:
# First check for available IO input
for key, mask in self._selector.select(0):
line = key.fileobj.readline().strip()
for task, stack in self._tasks_waiting_on_stdin:
self.schedule(task, line, stack)
self._tasks_waiting_on_stdin.clear()
# Next, run the next task
if self._tasks:
task = self._tasks.popleft()
task()
# Finally run time scheduled tasks
while self._timers and self._timers[0][0] < time():
task = self._timers[0][1]
del self._timers[0]
task()
self._running = False
def print_every(message, interval):
"""
Coroutine task to repeatedly print the message at the given interval
(in seconds)
"""
while True:
print("{} - {}".format(int(time()), message))
yield sleep_for_seconds(interval)
# def read_input(loop):
# """
# Coroutine task to repeatedly read new lines of input from stdin, treat
# the input as a number n, and calculate and display fib(n).
# """
# while True:
# line = yield sys.stdin
# if line == 'exit':
# loop.do_on_next_tick(loop.stop)
# continue
# n = int(line)
# print("fib({}) = {}".format(n, timed_fib(n)))
def read_input(loop):
while True:
line = yield sys.stdin
n = int(line)
fib_n = yield fib(n)
print("fib({}) = {}".format(n, fib_n))
def main():
loop = EventLoop()
hello_task = print_every('Hello world!', 3)
fib_task = read_input(loop)
loop.schedule(hello_task)
loop.schedule(fib_task)
loop.run_forever()
if __name__ == '__main__':
main()
|
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
sect = {}
res = []
for num in nums1:
if num in sect:
sect[num][0] += 1
else:
sect[num] = [1, 0]
for num in nums2:
if num in sect:
sect[num][1] += 1
for k in sect:
res += min(sect[k]) * [k]
return res
|
# @file Longest Palindrome
# @brief given string, find length of longest possible palindrome
# https://leetcode.com/problems/longest-palindrome
'''
Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.
This is case sensitive, for example "Aa" is not considered a palindrome here.
Note:
Assume the length of given string will not exceed 1,010.
'''
# Approach 1: dictionary
# time complexity: O(n), space complexity: O(1)
def longestPalindrome(self, s):
length = 0
odd = False
dict = {}
for elem in s:
dict[elem] = dict.get(elem, 0) + 1
for k, v in dict.items():
length += (v//2) * 2 #integer division to ignore odds
odd = odd | (v % 2) #odd is a flag which is set if atleast 1 number is odd
if odd == True:
length += 1
return length
# Approach 2: math based using collections
# time complexity: O(n), space complexity: O(1)
import collections
def longestPalindrome2(self, s):
# Count the number of odds in s - AND each number with 1 to get 0/1 and sum all 0/1
odds = sum(v & 1 for v in collections.Counter(s).values())
return len(s) - odds + bool(odds)
|
#!/usr/bin/env python
import os
import sys
from distutils.text_file import TextFile
from skbuild import setup
# Add current folder to path
# This is required to import versioneer in an isolated pip build
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import versioneer # noqa: E402
with open('README.rst', 'r') as fp:
readme = fp.read()
with open('HISTORY.rst', 'r') as fp:
history = fp.read().replace('.. :changelog:', '')
def parse_requirements(filename):
with open(filename, 'r') as file:
return TextFile(filename, file).readlines()
requirements = []
dev_requirements = parse_requirements('requirements-dev.txt')
# Require pytest-runner only when running tests
pytest_runner = (['pytest-runner>=2.0,<3dev']
if any(arg in sys.argv for arg in ('pytest', 'test'))
else [])
setup_requires = pytest_runner
setup(
name='ninja',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Jean-Christophe Fillion-Robin',
author_email='scikit-build@googlegroups.com',
packages=['ninja'],
entry_points={
'console_scripts': [
'ninja=ninja:ninja'
]
},
url=r'http://ninja-build.org/',
download_url=r'https://github.com/ninja-build/ninja/releases',
description=r'Ninja is a small build system with a focus on speed',
long_description=readme + '\n\n' + history,
classifiers=[
'License :: OSI Approved :: Apache Software License',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools'
],
license='Apache 2.0',
keywords='ninja build c++ fortran cross-platform cross-compilation',
install_requires=requirements,
tests_require=dev_requirements,
setup_requires=setup_requires
)
|
from argparse import Namespace
import chainer
import numpy
import pytest
import torch
import espnet.lm.chainer_backend.lm as lm_chainer
from espnet.nets.beam_search import beam_search
from espnet.nets.lm_interface import dynamic_import_lm
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.scorers.length_bonus import LengthBonus
from test.test_beam_search import prepare
from test.test_beam_search import rnn_args
def transfer_lstm(ch_lstm, th_lstm):
ch_lstm.upward.W.data[:] = 1
th_lstm.weight_ih.data[:] = torch.from_numpy(ch_lstm.upward.W.data)
ch_lstm.upward.b.data[:] = 1
th_lstm.bias_hh.data[:] = torch.from_numpy(ch_lstm.upward.b.data)
# NOTE: only lateral weight can directly transfer
# rest of the weights and biases have quite different placements
th_lstm.weight_hh.data[:] = torch.from_numpy(ch_lstm.lateral.W.data)
th_lstm.bias_ih.data.zero_()
def transfer_lm(ch_rnnlm, th_rnnlm):
assert isinstance(ch_rnnlm, lm_chainer.RNNLM)
assert isinstance(th_rnnlm, lm_pytorch.RNNLM)
th_rnnlm.embed.weight.data = torch.from_numpy(ch_rnnlm.embed.W.data)
if th_rnnlm.typ == "lstm":
for n in range(ch_rnnlm.n_layers):
transfer_lstm(ch_rnnlm.rnn[n], th_rnnlm.rnn[n])
else:
assert False
th_rnnlm.lo.weight.data = torch.from_numpy(ch_rnnlm.lo.W.data)
th_rnnlm.lo.bias.data = torch.from_numpy(ch_rnnlm.lo.b.data)
def test_lm():
n_vocab = 3
n_layers = 2
n_units = 2
batchsize = 5
for typ in ["lstm"]: # TODO(anyone) gru
rnnlm_ch = lm_chainer.ClassifierWithState(lm_chainer.RNNLM(n_vocab, n_layers, n_units, typ=typ))
rnnlm_th = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(n_vocab, n_layers, n_units, typ=typ))
transfer_lm(rnnlm_ch.predictor, rnnlm_th.predictor)
# test prediction equality
x = torch.from_numpy(numpy.random.randint(n_vocab, size=batchsize)).long()
with torch.no_grad(), chainer.no_backprop_mode(), chainer.using_config('train', False):
rnnlm_th.predictor.eval()
state_th, y_th = rnnlm_th.predictor(None, x.long())
state_ch, y_ch = rnnlm_ch.predictor(None, x.data.numpy())
for k in state_ch.keys():
for n in range(len(state_th[k])):
print(k, n)
print(state_th[k][n].data.numpy())
print(state_ch[k][n].data)
numpy.testing.assert_allclose(state_th[k][n].data.numpy(), state_ch[k][n].data, 1e-5)
numpy.testing.assert_allclose(y_th.data.numpy(), y_ch.data, 1e-5)
@pytest.mark.parametrize(
"lm_name, lm_args, device, dtype", [
(nn, args, device, dtype)
for nn, args in (
("default", Namespace(type="lstm", layer=2, unit=2, dropout_rate=0.5)),
("default", Namespace(type="gru", layer=2, unit=2, dropout_rate=0.5)),
("seq_rnn", Namespace(type="lstm", layer=2, unit=2, dropout_rate=0.5)),
("seq_rnn", Namespace(type="gru", layer=2, unit=2, dropout_rate=0.5)),
("transformer", Namespace(layer=1, unit=2, att_unit=2, head=2, dropout_rate=0.5, posenc_len=10))
)
for device in ("cpu", "cuda")
for dtype in ("float16", "float32", "float64")
])
def test_lm_trainable_and_decodable(lm_name, lm_args, device, dtype):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
if device == "cpu" and dtype == "float16":
pytest.skip("cpu float16 implementation is not available in pytorch yet")
dtype = getattr(torch, dtype)
model, x, ilens, y, data, train_args = prepare("rnn", rnn_args)
char_list = train_args.char_list
n_vocab = len(char_list)
lm = dynamic_import_lm(lm_name, backend="pytorch")(n_vocab, lm_args)
lm.to(device=device, dtype=dtype)
# test trainable
a = torch.randint(1, n_vocab, (3, 2), device=device)
b = torch.randint(1, n_vocab, (3, 2), device=device)
loss, logp, count = lm(a, b)
loss.backward()
for p in lm.parameters():
assert p.grad is not None
# test decodable
model.to(device=device, dtype=dtype).eval()
lm.eval()
scorers = model.scorers()
scorers["lm"] = lm
scorers["length_bonus"] = LengthBonus(len(char_list))
weights = dict(decoder=1.0, lm=1.0, length_bonus=1.0)
with torch.no_grad():
feat = x[0, :ilens[0]].to(device=device, dtype=dtype)
enc = model.encode(feat)
beam_size = 3
result = beam_search(
x=enc,
sos=model.sos,
eos=model.eos,
beam_size=beam_size,
vocab_size=len(train_args.char_list),
weights=weights,
scorers=scorers,
token_list=train_args.char_list
)
assert len(result) >= beam_size
|
# Given a number sequence, find the minimum number of elements that should be
# deleted to make the remaining sequence sorted.
# Longest increasing subsequence pattern
import math
def MD(A):
# Ini first, to create the matrix
# Here we are looking for minimum, so we initialize T to infinity
T = [math.inf for i in range(len(A))]
# Base case, it requires only to delete the character at i = 0 if we have only one character
# since an empty sequence is sorted
T[0] = 1
for i in range(len(A)):
for j in range(i + 1, len(A)):
if A[j] < A[i]:
# if we need to delete the current element of the sequence ot keep it sorted
# then we need add one and choose the minimum
T[j] = min(T[j], T[i] + 1)
else:
# otherwise carry the best result so far forward
T[j] = T[i]
return T[-1]
def lengthOfLIS(A):
if len(A) == 0: return 0
LIS = [1 for i in range(len(A))]
for i in range(len(A)):
for j in range(i + 1, len(A)):
if A[j] > A[i]:
LIS[j] = max(LIS[j], LIS[i] + 1)
return max(LIS)
def main():
A = [4, 2, 3, 6, 10, 1, 12]
print("Testcase 1 is {0} for sequence {1}, since it is calculated as {2} and it should be {3}"
.format('Pass' if MD(A) == len(A) - lengthOfLIS(A) else 'Fail', A, MD(A), len(A) - lengthOfLIS(A)))
A = [-4, 10, 3, 7, 15]
print("Testcase 1 is {0} for sequence {1}, since it is calculated as {2} and it should be {3}"
.format('Pass' if MD(A) == len(A) - lengthOfLIS(A) else 'Fail', A, MD(A), len(A) - lengthOfLIS(A)))
if __name__ == '__main__':
main()
|
from django.contrib.auth.models import Group
from rest_framework import serializers
from apps.teachers.models import Teacher
from apps.users.models import User, House
from apps.users.serializers import BasicUserSerializer
class TeacherSerializer(serializers.Serializer):
id = serializers.IntegerField(source='user_id')
subject = serializers.ReadOnlyField()
class TeacherDotsSerializer(serializers.Serializer):
id = serializers.IntegerField()
subject = serializers.ReadOnlyField()
class BasicTeacherUserDetailsSerializer(serializers.Serializer):
user_details = BasicUserSerializer(source='user')
class TeacherListSerializer(serializers.Serializer):
user_details = BasicUserSerializer(source='user')
subject = serializers.CharField()
def create(self, validated_data):
# password is the same as the username if a password is not supplied
if 'password' not in validated_data['user']:
validated_data['user']['password'] = validated_data['user']['username']
# When creating a user we can't just provide the house name, the "create_user" method requires a house instance
validated_data['user']['house'] = House.objects.get(name=validated_data['user']['house'])
new_user = User.objects.create_user(**validated_data['user'])
teacher = Teacher.objects.create(user_id=new_user.id, subject=validated_data['subject'])
teacher_group = Group.objects.get(name='teachers')
new_user.groups.add(teacher_group)
return teacher
def update(self, instance, validated_data):
user_details = validated_data.pop('user', None)
student_details = validated_data
if user_details is not None:
user = User.objects.get(id=instance.user_id)
user.username = user_details.get('username', user.username)
user.set_password(user_details.get('password', user.password))
user.first_name = user_details.get('first_name', user.first_name)
user.last_name = user_details.get('last_name', user.last_name)
user.email = user_details.get('email', user.email)
user.dob = user_details.get('dob', user.dob)
user.email = user_details.get('email', user.email)
user_house_instance = House.objects.get(name=user_details.get('house', user.house))
user.house = user_house_instance
user.comments = user_details.get('comments', user.comments)
user.save()
if student_details is not None:
instance.subject = validated_data.get('subject', instance.subject)
instance.save()
return instance
|
# Generated by Django 3.0.2 on 2020-01-17 14:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cookbook', '0007_auto_20191226_0852'),
]
operations = [
migrations.CreateModel(
name='MealPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meal', models.CharField(choices=[('BREAKFAST', 'Breakfast'), ('LUNCH', 'Lunch'), ('DINNER', 'Dinner'), ('OTHER', 'Other')], default='BREAKFAST', max_length=128)),
('note', models.TextField(blank=True)),
('date', models.DateField()),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cookbook.Recipe')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# Licensed under an MIT open source license - see LICENSE
'''
Test functions for PSpec
'''
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from ..statistics import PowerSpectrum, PSpec_Distance
from ._testing_data import \
dataset1, dataset2, computed_data, computed_distances
class testPSpec(TestCase):
def setUp(self):
self.dataset1 = dataset1
self.dataset2 = dataset2
def test_PSpec_method(self):
self.tester = \
PowerSpectrum(dataset1["integrated_intensity"][0] *
dataset1["integrated_intensity_error"][0] ** 2.,
dataset1["integrated_intensity"][1])
self.tester.run()
assert np.allclose(self.tester.ps1D, computed_data['pspec_val'])
def test_PSpec_distance(self):
self.tester_dist = PSpec_Distance(dataset1, dataset2)
self.tester_dist.distance_metric()
npt.assert_almost_equal(self.tester_dist.distance,
computed_distances['pspec_distance'])
|
# TensorFlow & Keras
import tensorflow as tf
from tensorflow import keras
# Numpy & Matplot
import numpy as np
import matplotlib.pyplot as plt
# Version >= 1.12.0
print(tf.__version__)
# Download fashion dataset
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Classifications
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# Interesting print statements
print(train_images.shape) # (60000, 28, 28)
print(len(train_labels)) # 60000
print(train_labels) # array([9, 0, 0, ..., 3, 0, 5], dtype=uint8)
print(test_images.shape) # (10000, 28, 28)
print(len(test_labels)) # 10000
# Preprocess data
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
# View plot
# plt.show()
# Consistent preprocessing
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
# View plot
# plt.show()
# Setup the layers
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(128, activation=tf.nn.relu), # We deep learning now, dawg
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# Compile the model
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(train_images, train_labels, epochs=5)
# Evaluate accuracy
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
# Make predictions
predictions = model.predict(test_images)
print(predictions[0]) # array of 10 numbers representing the confidence in each label
print(np.argmax(predictions[0])) # 9 -> most confident in label 9 (ankle boot), which we can check...
print(test_labels[0]) # also 9...
## Fancy prediction graphs
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = '#26b0ff'
else:
color = '#b7b0ff'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('#b7b0ff')
thisplot[true_label].set_color('#26b0ff')
## The famous Ankle boot
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
# View our fancy Ankle boot graphic
# plt.show() # Editor's note: this seems to also render all previous plt graphs.
## The infamous ~bag~ sandal sneaker
i = 12 # This seems redundant, DRY it up?
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
# View our terrible prediction
# plt.show()
## Once more, with feeling
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
# View best graphic yet
#plt.show()
## Finally, predict a single image
# Grab an image from the test dataset
i = 0 # I heard you like useless assignments to `i`, so here's another on the house
img = test_images[i]
print(img.shape) # (28, 28)
# Convert image to batch of 1 (optimizations expect batch as params)
img = (np.expand_dims(img,0))
print(img.shape) # (1, 28, 28)
# Predict the image
predictions_single = model.predict(img)
print(predictions_single) # Array of prediction values for 10 labels...
plt.figure(figsize=(6,3))
plot_value_array(0, predictions_single, test_labels)
plt.xticks(range(10), class_names, rotation=45)
plt.show()
print(np.argmax(predictions_single[0])) # 9
# Fin.
|
"""Curvature computation for a circular droplet. (5 seconds)
For particles distributed in a box, an initial circular interface is
distinguished by coloring the particles within a circle. The resulting
equations can then be used to check for the numerical curvature and
discretized dirac-delta function based on this artificial interface.
"""
import numpy
# Particle generator
from pysph.base.utils import get_particle_array
from pysph.base.kernels import QuinticSpline
# SPH Equations and Group
from pysph.sph.equation import Group
from pysph.sph.wc.viscosity import ClearyArtificialViscosity
from pysph.sph.wc.transport_velocity import SummationDensity, MomentumEquationPressureGradient,\
SolidWallPressureBC, SolidWallNoSlipBC, \
StateEquation, MomentumEquationArtificialStress, MomentumEquationViscosity
from pysph.sph.surface_tension import ColorGradientUsingNumberDensity, \
InterfaceCurvatureFromNumberDensity, ShadlooYildizSurfaceTensionForce,\
SmoothedColor, AdamiColorGradient, AdamiReproducingDivergence,\
MorrisColorGradient
from pysph.sph.gas_dynamics.basic import ScaleSmoothingLength
# PySPH solver and application
from pysph.solver.application import Application
from pysph.solver.solver import Solver
# Integrators and Steppers
from pysph.sph.integrator_step import TransportVelocityStep
from pysph.sph.integrator import PECIntegrator
# Domain manager for periodic domains
from pysph.base.nnps import DomainManager
# problem parameters
dim = 2
domain_width = 1.0
domain_height = 1.0
# numerical constants
wavelength = 1.0
wavenumber = 2*numpy.pi/wavelength
rho0 = rho1 = 1000.0
rho2 = 1*rho1
U = 0.5
sigma = 1.0
# discretization parameters
dx = dy = 0.0125
dxb2 = dyb2 = 0.5 * dx
hdx = 1.5
h0 = hdx * dx
rho0 = 1000.0
c0 = 20.0
p0 = c0*c0*rho0
nu = 0.01
# set factor1 to [0.5 ~ 1.0] to simulate a thick or thin
# interface. Larger values result in a thick interface. Set factor1 =
# 1 for the Morris Method I
factor1 = 1.0
factor2 = 1./factor1
# correction factor for Morris's Method I. Set with_morris_correction
# to True when using this correction.
epsilon = 0.01/h0
# time steps
dt_cfl = 0.25 * h0/( 1.1*c0 )
dt_viscous = 0.125 * h0**2/nu
dt_force = 1.0
dt = 0.9 * min(dt_cfl, dt_viscous, dt_force)
tf = 5*dt
staggered = True
class CircularDroplet(Application):
def create_domain(self):
return DomainManager(
xmin=0, xmax=domain_width, ymin=0, ymax=domain_height,
periodic_in_x=True, periodic_in_y=True)
def create_particles(self):
if staggered:
x1, y1 = numpy.mgrid[ dxb2:domain_width:dx, dyb2:domain_height:dy ]
x2, y2 = numpy.mgrid[ dx:domain_width:dx, dy:domain_height:dy ]
x1 = x1.ravel(); y1 = y1.ravel()
x2 = x2.ravel(); y2 = y2.ravel()
x = numpy.concatenate( [x1, x2] )
y = numpy.concatenate( [y1, y2] )
volume = dx*dx/2
else:
x, y = numpy.mgrid[ dxb2:domain_width:dx, dyb2:domain_height:dy ]
x = x.ravel(); y = y.ravel()
volume = dx*dx
m = numpy.ones_like(x) * volume * rho0
rho = numpy.ones_like(x) * rho0
h = numpy.ones_like(x) * h0
cs = numpy.ones_like(x) * c0
# additional properties required for the fluid.
additional_props = [
# volume inverse or number density
'V',
# color and gradients
'color', 'scolor', 'cx', 'cy', 'cz', 'cx2', 'cy2', 'cz2',
# discretized interface normals and dirac delta
'nx', 'ny', 'nz', 'ddelta',
# interface curvature
'kappa',
# filtered velocities
'uf', 'vf', 'wf',
# transport velocities
'uhat', 'vhat', 'what', 'auhat', 'avhat', 'awhat',
# imposed accelerations on the solid wall
'ax', 'ay', 'az', 'wij',
# velocity of magnitude squared needed for TVF
'vmag2',
# variable to indicate reliable normals and normalizing
# constant
'N', 'wij_sum'
]
# get the fluid particle array
fluid = get_particle_array(
name='fluid', x=x, y=y, h=h, m=m, rho=rho, cs=cs,
additional_props=additional_props)
# set the color of the inner circle
for i in range(x.size):
if ( ((fluid.x[i]-0.5)**2 + (fluid.y[i]-0.5)**2) <= 0.25**2 ):
fluid.color[i] = 1.0
# particle volume
fluid.V[:] = 1./volume
# set additional output arrays for the fluid
fluid.add_output_arrays(['V', 'color', 'cx', 'cy', 'nx', 'ny', 'ddelta', 'p',
'kappa', 'N', 'scolor'])
print("2D Circular droplet deformation with %d fluid particles"%(
fluid.get_number_of_particles()))
return [fluid,]
def create_solver(self):
kernel = QuinticSpline(dim=2)
integrator = PECIntegrator( fluid=TransportVelocityStep() )
solver = Solver(
kernel=kernel, dim=dim, integrator=integrator,
dt=dt, tf=tf, adaptive_timestep=False, pfreq=1)
return solver
def create_equations(self):
equations = [
# We first compute the mass and number density of the fluid
# phase. This is used in all force computations henceforth. The
# number density (1/volume) is explicitly set for the solid phase
# and this isn't modified for the simulation.
Group(equations=[
SummationDensity( dest='fluid', sources=['fluid'] ),
] ),
# Given the updated number density for the fluid, we can update
# the fluid pressure. Also compute the smoothed color based on the
# color index for a particle.
Group(equations=[
StateEquation(dest='fluid', sources=None, rho0=rho0,
p0=p0, b=1.0),
SmoothedColor( dest='fluid', sources=['fluid'], smooth=True ),
] ),
#################################################################
# Begin Surface tension formulation
#################################################################
# Scale the smoothing lengths to determine the interface
# quantities.
Group(equations=[
ScaleSmoothingLength(dest='fluid', sources=None, factor=factor1)
], update_nnps=False ),
# Compute the gradient of the color function with respect to the
# new smoothing length. At the end of this Group, we will have the
# interface normals and the discretized dirac delta function for
# the fluid-fluid interface.
Group(equations=[
MorrisColorGradient(dest='fluid', sources=['fluid'], epsilon=0.01/h0),
#ColorGradientUsingNumberDensity(dest='fluid', sources=['fluid'],
# epsilon=epsilon),
#AdamiColorGradient(dest='fluid', sources=['fluid']),
],
),
# Compute the interface curvature using the modified smoothing
# length and interface normals computed in the previous Group.
Group(equations=[
InterfaceCurvatureFromNumberDensity(dest='fluid', sources=['fluid'],
with_morris_correction=True),
#AdamiReproducingDivergence(dest='fluid', sources=['fluid'],
# dim=2),
], ),
# Now rescale the smoothing length to the original value for the
# rest of the computations.
Group(equations=[
ScaleSmoothingLength(dest='fluid', sources=None, factor=factor2)
], update_nnps=False,
),
#################################################################
# End Surface tension formulation
#################################################################
# The main acceleration block
Group(
equations=[
# Gradient of pressure for the fluid phase using the
# number density formulation.
MomentumEquationPressureGradient(
dest='fluid', sources=['fluid'], pb=p0),
# Artificial viscosity for the fluid phase.
MomentumEquationViscosity(
dest='fluid', sources=['fluid'], nu=nu),
# Surface tension force for the SY11 formulation
ShadlooYildizSurfaceTensionForce(dest='fluid', sources=None, sigma=sigma),
# Artificial stress for the fluid phase
MomentumEquationArtificialStress(dest='fluid', sources=['fluid']),
], )
]
return equations
if __name__ == '__main__':
app = CircularDroplet()
app.run()
|
#!/usr/bin/env python
# Source: https://www.geeksforgeeks.org/python-image-classification-using-keras/
# Author: Nitish_Gangwar - https://auth.geeksforgeeks.org/user/Nitish_Gangwar/articles
# Importing all necessary libraries
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import os
img_width, img_height = 224, 224
def load_data():
dirname = os.path.dirname(os.path.realpath(__file__))
train_data_dir = dirname + '/data/train'
validation_data_dir = dirname + '/data/test'
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
return train_data_dir, validation_data_dir, input_shape
def build_model():
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def train_model(model, train_data_dir, validation_data_dir):
batch_size = 16
epochs = 10
nb_train_samples =400
nb_validation_samples = 100
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('model_saved.h5')
train_data_dir, validation_data_dir, input_shape = load_data()
model = build_model()
train_model(model, train_data_dir, validation_data_dir)
|
"""OPM dataset."""
from .opm import data_path, get_version
|
"""Calculate maximum lyapunov exponent of different coupled physical system.
The region below zero is s."""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def lyap(cl):
def diff_chua(u):
k = 1
alpha = -0.1
beta = -1
gamma = 0.1
a = 1
b = -2.5
H = [1.8348, 10, 10,
1.1002, 0, 0,
-0.3254, 1, 0]
x,y,z = u
f = [(-k*alpha-k*alpha*b+cl*H[0])*x - 0*k*alpha*a*x**3 + (k*alpha+cl*H[1])*y + cl*H[2]*z,
(k+cl*H[3])*x+(-k+cl*H[4])*y+(k+cl*H[5])*z,
cl*H[6]*x +(-k*beta+cl*H[7])*y+(-k*gamma+cl*H[8])*z]
Df = [[-k*alpha-k*alpha*b+cl*H[0], k*alpha+cl*H[1], 0+cl*H[2]],
[k+cl*H[3], -k+cl*H[4], k+cl*H[5]],
[0+cl*H[6], -k*beta+cl*H[7], -k*gamma+cl*H[8]]]
return np.array(f), np.array(Df)
def diff_Lorenz(u):
H = [1, 0, 0,
0, 0, 0,
0, 0, 0]
sigma = 10
r = 28
bb = 8 / 3
x, y, z = u
f = [sigma * (y - x)+cl*H[0]*x + cl*H[1]*y + cl*H[2]*z,
r * x + cl*H[3]*x - y+cl*H[4]*y - x * z+ cl*H[5]*z,
x * y + cl*H[6]*x +cl*H[7]*y +cl*H[8]*z - bb * z]
Df = [[-sigma+cl*H[0], sigma+cl*H[1], 0+cl*H[2]],
[r - z+cl*H[3], -1+cl*H[4], -x+cl*H[5]],
[y+cl*H[6], x+cl*H[7], -bb+cl*H[8]]]
return np.array(f), np.array(Df)
def diff_rossler(u):
H = [1, 0, 0,
0, 0, 0,
0, 0, 0]
a = 0.2
b =0.2
c= 6.0
x, y, z =u
f = [-y-z + cl * H[0] * x + cl * H[1] * y + cl * H[2] * z,
x+a*y + cl * H[3] * x + cl * H[4] * y +cl * H[5] * z,
b+z*(x-c) + cl * H[6] * x + cl * H[7] * y + cl * H[8] * z]
Df = [[0 + cl * H[0], -1 + cl * H[1], -1 + cl * H[2]],
[1 + cl * H[3], a + cl * H[4], cl * H[5]],
[z + cl * H[6], 0 + cl * H[7], x-c + cl * H[8]]]
return np.array(f), np.array(Df)
def LEC_system(u):
#x,y,z = u[:3]
U = u[3:12].reshape([3,3])
L = u[12:15]
f,Df = diff_rossler(u[:3])
A = U.T.dot(Df.dot(U))
dL = np.diag(A).copy();
for i in range(3):
A[i,i] = 0
for j in range(i+1,3): A[i,j] = -A[j,i]
dU = U.dot(A)
return np.concatenate([f,dU.flatten(),dL])
u0 = np.ones(3)
U0 = np.identity(3)
L0 = np.zeros(3)
u0 = np.concatenate([u0, U0.flatten(), L0])
t = np.linspace(0,100,2000)
u = odeint(lambda u,t:LEC_system(u),u0,t, hmax=0.05)
L = u[5:,12:15].T/t[5:]
LE = L[:,-1]
MLE = np.max(LE)
return MLE
x = np.arange(-10, 0, 0.1)
positive_list =[]
LElist = []
for i in range(len(x)):
a = lyap(x[i])
if a<0:
positive_list.append(x[i])
LElist.append(a)
print(positive_list)
plt.plot(x, LElist)
plt.plot(x, list([0 for i in range(len(x))]))
plt.ylabel('maximum lyapunov exponent')
plt.xlabel(r'$\gamma$')
plt.savefig('max_lyap.pdf')
plt.show()
|
from datetime import datetime, timedelta
from typing import List, Optional
import dateutil.relativedelta
from container import ServerContainer
from dependency_injector.wiring import Provide, inject
from fastapi import APIRouter, Depends
from starlette import status
from carbonserver.api.dependencies import get_token_header
from carbonserver.api.schemas import (
Organization,
OrganizationCreate,
OrganizationReport,
)
from carbonserver.api.services.organization_service import OrganizationService
from carbonserver.api.usecases.organization.organization_sum import (
OrganizationSumsUsecase,
)
ORGANIZATIONS_ROUTER_TAGS = ["Organizations"]
router = APIRouter(
dependencies=[Depends(get_token_header)],
)
@router.post(
"/organization",
tags=ORGANIZATIONS_ROUTER_TAGS,
status_code=status.HTTP_201_CREATED,
response_model=Organization,
)
@inject
def add_organization(
organization: OrganizationCreate,
organization_service: OrganizationService = Depends(
Provide[ServerContainer.organization_service]
),
) -> Organization:
return organization_service.add_organization(organization)
@router.get(
"/organization/{organization_id}",
tags=ORGANIZATIONS_ROUTER_TAGS,
status_code=status.HTTP_200_OK,
response_model=Organization,
)
@inject
def read_organization(
organization_id: str,
organization_service: OrganizationService = Depends(
Provide[ServerContainer.organization_service]
),
) -> Organization:
return organization_service.read_organization(organization_id)
@router.get(
"/organizations",
tags=ORGANIZATIONS_ROUTER_TAGS,
status_code=status.HTTP_200_OK,
response_model=List[Organization],
)
@inject
def list_organizations(
organization_service: OrganizationService = Depends(
Provide[ServerContainer.organization_service]
),
) -> List[Organization]:
return organization_service.list_organizations()
@router.get(
"/organization/{organization_id}/sums/",
tags=ORGANIZATIONS_ROUTER_TAGS,
status_code=status.HTTP_200_OK,
)
@inject
def read_organization_detailed_sums(
organization_id: str,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
organization_global_sum_usecase: OrganizationSumsUsecase = Depends(
Provide[ServerContainer.organization_sums_usecase]
),
) -> OrganizationReport:
start_date = (
start_date
if start_date
else datetime.now() - dateutil.relativedelta.relativedelta(months=3)
)
end_date = end_date if end_date else datetime.now() + timedelta(days=1)
return organization_global_sum_usecase.compute_detailed_sum(
organization_id, start_date, end_date
)
|
from .common import * # noqa
DEBUG = True
ALLOWED_HOSTS = ['*']
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Security
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
|
import torch
import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import numpy as np
import ctypes
try:
ctypes.CDLL('libtorch2trt_plugins.so')
def create_reflection_pad_2d_plugin(paddingLeft, paddingRight, paddingTop, paddingBottom):
registry = trt.get_plugin_registry()
creator = registry.get_plugin_creator('ReflectionPad2dPlugin', '1', '')
fc = trt.PluginFieldCollection([
trt.PluginField(
'paddingLeft',
np.array([paddingLeft]).astype(np.int32),
trt.PluginFieldType.INT32
),
trt.PluginField(
'paddingRight',
np.array([paddingRight]).astype(np.int32),
trt.PluginFieldType.INT32
),
trt.PluginField(
'paddingTop',
np.array([paddingTop]).astype(np.int32),
trt.PluginFieldType.INT32
),
trt.PluginField(
'paddingBottom',
np.array([paddingBottom]).astype(np.int32),
trt.PluginFieldType.INT32
)
])
return creator.create_plugin('', fc)
@tensorrt_converter(nn.ReflectionPad2d.forward)
def convert_reflection_pad(ctx):
module = get_arg(ctx, 'self', pos=0, default=None)
input = get_arg(ctx, 'x', pos=1, default=None)
output = ctx.method_return
input_trt = input._trt
plugin = create_reflection_pad_2d_plugin(
module.padding[0],
module.padding[1],
module.padding[2],
module.padding[3]
)
layer = ctx.network.add_plugin_v2([input_trt], plugin)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
def test_reflection_pad_2d_simple():
return nn.ReflectionPad2d(1)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
def test_reflection_pad_2d_simple():
return nn.ReflectionPad2d(2)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
def test_reflection_pad_2d_simple():
return nn.ReflectionPad2d((1, 0, 1, 0))
except:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.