code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
capabilities = self._call_method('get_server_capabilities') # TODO(nisha): Assumption is that Redfish always see the pci_device # member name field populated similarly to IPMI. # If redfish is not able to get nic_capacity, we can fall back to # IPMI way of retrieving nic_capacity in the future. As of now # the IPMI is not tested on Gen10, hence assuming that # Redfish will always be able to give the data. if ('Gen10' not in self.model): major_minor = ( self._call_method('get_ilo_firmware_version_as_major_minor')) # NOTE(vmud213): Even if it is None, pass it on to get_nic_capacity # as we still want to try getting nic capacity through ipmitool # irrespective of what firmware we are using. nic_capacity = ipmi.get_nic_capacity(self.ipmi_host_info, major_minor) if nic_capacity: capabilities.update({'nic_capacity': nic_capacity}) if capabilities: return capabilities
def get_server_capabilities(self)
Get hardware properties which can be used for scheduling :return: a dictionary of server capabilities. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
9.910078
9.745813
1.016855
f=open(filename,'r') vv=[] v=[] lines = [] line = '' for i in range(0,6): line = f.readline() lines.extend([line]) hval = lines[2].split() hlist = lines[1].split() header_attr = {} for a,b in zip(hlist,hval): header_attr[a] = float(b) if only is 'header_attr': return header_attr cols = {} colnum = lines[4].split() colname = lines[5].split() for a,b in zip(colname,colnum): cols[a] = int(b) data = [] old_percent = 0 for i in range(data_rows): # writing reading status percent = int(i*100/np.max([1, data_rows-1])) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r reading " + "...%d%%" % percent) old_percent = percent line = f.readline() v=line.split() try: vv=np.array(v,dtype='float64') except ValueError: for item in v: if item.__contains__('.') and not item.__contains__('E'): v[v.index(item)]='0' data.append(vv) print(' \n') f.close() a=np.array(data) data = [] return header_attr, cols, a
def _read_mesafile(filename,data_rows=0,only='all')
private routine that is not directly called by the user
3.562223
3.554247
1.002244
file_out=file_in+'sa' f = open(file_in) lignes = f.readlines() f.close() nb = np.array([],dtype=int) # model number nb = np.concatenate((nb ,[ int(lignes[len(lignes)-1].split()[ 0])])) nbremove = np.array([],dtype=int) # model number i=-1 for i in np.arange(len(lignes)-1,0,-1): line = lignes[i-1] if i > 6 and line != "" : if int(line.split()[ 0])>=nb[-1]: nbremove = np.concatenate((nbremove,[i-1])) else: nb = np.concatenate((nb ,[ int(line.split()[ 0])])) i=-1 for j in nbremove: lignes.remove(lignes[j]) fout = open(file_out,'w') for j in np.arange(len(lignes)): fout.write(lignes[j]) fout.close()
def _cleanstarlog(file_in)
cleaning history.data or star.log file, e.g. to take care of repetitive restarts. private, should not be called by user directly Parameters ---------- file_in : string Typically the filename of the mesa output history.data or star.log file, creates a clean file called history.datasa or star.logsa. (thanks to Raphael for providing this tool)
3.01287
2.912328
1.034523
prof_ind_name = self.prof_ind_name f = open(self.sldir+'/'+prof_ind_name,'r') line = f.readline() numlines=int(line.split()[0]) print(str(numlines)+' in profiles.index file ...') model=[] log_file_num=[] for line in f: model.append(int(line.split()[0])) log_file_num.append(int(line.split()[2])) log_ind={} # profile.data number from model for a,b in zip(model,log_file_num): log_ind[a] = b self.log_ind=log_ind self.model=model
def _profiles_index(self)
read profiles.index and make hash array Notes ----- sets the attributes. log_ind : hash array that returns profile.data or log.data file number from model number. model : the models for which profile.data or log.data is available
4.721164
3.652645
1.292533
self._profiles_index() if inum <= 0: print("Smallest argument is 1") return inum_max = len(self.log_ind) inum -= 1 if inum > inum_max: print('There are only '+str(inum_max)+' profile file available.') log_data_number = -1 return log_data_number else: log_data_number=self.log_ind[self.model[inum]] print('The '+str(inum+1)+'. profile.data file is '+ \ str(log_data_number)) return log_data_number
def _log_file_ind(self,inum)
Information about available profile.data or log.data files. Parameters ---------- inum : integer Attempt to get number of inum's profile.data file. inum_max: max number of profile.data or log.data files available
4.714219
4.089327
1.15281
column_array = self.data[:,self.cols[str_name]-1].astype('float') return column_array
def get(self,str_name)
return a column of data with the name str_name. Parameters ---------- str_name : string Is the name of the column as printed in the profilennn.data or lognnn.data file; get the available columns from self.cols (where you replace self with the name of your instance)
10.278289
7.545165
1.362235
try: from ProgenitorHotb_new import ProgenitorHotb_new except ImportError: print('Module ProgenitorHotb_new not found.') return nz=len(self.get('mass')) prog=ProgenitorHotb_new(nz) prog.header = '#'+description+'\n' prog.xzn = self.get('rmid')[::-1]*ast.rsun_cm prog.massb = self.get('mass')[::-1] prog.r_ob = max(self.get('radius'))*ast.rsun_cm prog.temp = 10.**self.get('logT')[::-1]*8.620689655172413e-11 # in MeV prog.stot = self.get('entropy')[::-1] prog.ye = self.get('ye')[::-1] prog.densty = 10.**self.get('logRho')[::-1] prog.press = 10.**self.get('logP')[::-1] prog.eint = self.get('energy')[::-1] prog.velx = self.get('velocity')[::-1] nuclei=['neut','h1','he4','c12','o16','ne20','mg24','si28','s32', 'ar36','ca40','ti44','cr48','fe52','fake'] for i in range(len(nuclei)): if nuclei[i] == 'fake': ni56 = self.get('fe56')+self.get('cr56') prog.xnuc[:,i] = ni56[::-1] else: prog.xnuc[:,i] = self.get(nuclei[i])[::-1] prog.write(name)
def write_PROM_HOTB_progenitor(self,name,description)
Write a progenitor file for the PROMETHEUS/HBOT supernova code. Parameters ---------- name : string File name for the progenitor file description : string Information to be written into the file header.
3.996502
4.059219
0.984549
from scipy import interpolate ye = self.get('ye') newye=[] rho = 10.**self.get('logRho')[::-1] # centre to surface # get index to strip all but the core: idx = np.abs(rho - rhostrip).argmin() + 1 rho = rho[:idx] rhoc = rho[0] rad = 10.**self.get('logR') * ast.rsun_cm rad = rad[::-1][:idx] ye = ye[::-1][:idx] print('there will be about ',old_div(rad[-1], dr), 'mass cells...') # add r = 0 point to all arrays rad = np.insert(rad,0,0) ye = np.insert(ye,0,ye[0]) rho = np.insert(rho,0,rho[0]) print(rad) # interpolate fye = interpolate.interp1d(rad,ye) frho = interpolate.interp1d(rad,rho) newye = [] newrho = [] newrad = [] Tc = 10.**self.get('logT')[-1] for i in range(nzn): if i * dr > rad[-1]: break newye.append(fye( i * dr )) newrho.append(frho( i * dr )) newrad.append( i * dr ) f = open('M875.inimod','w') f.write(str(Tc)+' \n') f.write(str(rhoc)+' \n') for i in range(len(newye)): f.write(str(i+1)+' '+str(newrad[i])+' '+\ str(newrho[i])+' '+str(newye[i])+' \n') f.close()
def write_LEAFS_model(self,nzn=30000000,dr=5.e4, rhostrip=5.e-4)
write an ascii file that will be read by Sam's version of inimod.F90 in order to make an initial model for LEAFS
4.170022
4.03461
1.033563
mass = self.get('mass') radius = self.get('radius') * ast.rsun_cm eps_nuc = self.get('eps_nuc') eps_neu = self.get('non_nuc_neu') if ixaxis == 'mass': xaxis = mass xlab = 'Mass / M$_\odot$' else: xaxis = old_div(radius, 1.e8) # Mm xlab = 'Radius (Mm)' pl.plot(xaxis, np.log10(eps_nuc), 'k-', label='$\epsilon_\mathrm{nuc}>0$') pl.plot(xaxis, np.log10(-eps_nuc), 'k--', label='$\epsilon_\mathrm{nuc}<0$') pl.plot(xaxis, np.log10(eps_neu), 'r-', label='$\epsilon_\\nu$') pl.xlabel(xlab) pl.ylabel('$\log(\epsilon_\mathrm{nuc},\epsilon_\\nu)$') pl.legend(loc='best').draw_frame(False)
def energy_profile(self,ixaxis)
Plot radial profile of key energy generations eps_nuc, eps_neu etc. Parameters ---------- ixaxis : 'mass' or 'radius'
3.245037
2.918875
1.111742
sldir = self.sldir slname = self.slname slaname = slname+'sa' if not os.path.exists(sldir+'/'+slaname): print('No '+self.slname+'sa file found, create new one from '+self.slname) _cleanstarlog(sldir+'/'+slname) else: if self.clean_starlog: print('Requested new '+self.slname+'sa; create new from '+self.slname) _cleanstarlog(sldir+'/'+slname) else: print('Using old '+self.slname+'sa file ...') cmd=os.popen('wc '+sldir+'/'+slaname) cmd_out=cmd.readline() cnum_cycles=cmd_out.split()[0] num_cycles=int(cnum_cycles) - 6 filename=sldir+'/'+slaname header_attr,cols,data = _read_mesafile(filename,data_rows=num_cycles) self.cols = cols self.header_attr = header_attr self.data = data
def _read_starlog(self)
read history.data or star.log file again
4.280209
4.151151
1.03109
def C_O(model): surface_c12=model.get('surface_c12') surface_o16=model.get('surface_o16') CORatio=old_div((surface_c12*4.),(surface_o16*3.)) return CORatio if ixaxis=='time': xax=self.get('star_age') elif ixaxis=='model': xax=self.get('model_number') else: raise IOError("ixaxis not recognised") pl.figure(ifig) pl.plot(xax,C_O(self))
def CO_ratio(self,ifig,ixaxis)
plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis Parameters ---------- ifig : integer Figure number in which to plot ixaxis : string what quantity is to be on the x-axis, either 'time' or 'model' The default is 'model'
4.902298
3.874119
1.265397
# fsize=18 # # params = {'axes.labelsize': fsize, # # 'font.family': 'serif', # 'font.family': 'Times New Roman', # 'figure.facecolor': 'white', # 'text.fontsize': fsize, # 'legend.fontsize': fsize, # 'xtick.labelsize': fsize*0.8, # 'ytick.labelsize': fsize*0.8, # 'text.usetex': False} # # try: # pl.rcParams.update(params) # except: # pass if ifig is not None: pl.figure(ifig) if s2ms: h1=self.get('center_h1') idx=np.where(h1[0]-h1>=3.e-3)[0][0] skip=idx else: skip=0 x = self.get('log_Teff')[skip:] y = self.get('log_L')[skip:] if label is not None: if colour is not None: line,=pl.plot(x,y,label=label,color=colour,**kwargs) else: line,=pl.plot(x,y,label=label,**kwargs) else: if colour is not None: line,=pl.plot(x,y,color=colour,**kwargs) else: line,=pl.plot(x,y,**kwargs) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) # pyl.plot(self.data[:,self.cols['log_Teff']-1],\ # self.data[:,self.cols['log_L']-1],\ # label = "M="+str(self.header_attr['initial_mass'])+", Z="\ # +str(self.header_attr['initial_z'])) pyl.xlabel('$\log T_{\\rm eff}$') pyl.ylabel('$\log L$') x1,x2=pl.xlim() if x2 > x1: ax=pl.gca() ax.invert_xaxis()
def hrd(self,ifig=None,label=None,colour=None,s2ms=False, dashes=None,**kwargs)
Plot an HR diagram Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None s2ms : boolean, optional "Skip to Main Sequence"? The default is False. dashes : list, optional Custom dashing style. If None, ignore. The default is None.
2.399864
2.422115
0.990813
pyl.plot(self.data[:,self.cols['log_Teff']-1],\ self.data[:,self.cols['log_L']-1],label = key_str) pyl.legend() pyl.xlabel('log Teff') pyl.ylabel('log L') x1,x2=pl.xlim() if x2 > x1: self._xlimrev()
def hrd_key(self, key_str)
plot an HR diagram Parameters ---------- key_str : string A label string
5.545943
5.228163
1.060782
xl_old=pyl.gca().get_xlim() if input_label == "": my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z']) else: my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label) pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label) pyl.legend(loc=0) xl_new=pyl.gca().get_xlim() pyl.xlabel('log Teff') pyl.ylabel('log L') if any(array(xl_old)==0): pyl.gca().set_xlim(max(xl_new),min(xl_new)) elif any(array(xl_new)==0): pyl.gca().set_xlim(max(xl_old),min(xl_old)) else: pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)])
def hrd_new(self, input_label="", skip=0)
plot an HR diagram with options to skip the first N lines and add a label string Parameters ---------- input_label : string, optional Diagram label. The default is "". skip : integer, optional Skip the first n lines. The default is 0.
2.492282
2.504507
0.995119
fsize=18 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'text.fontsize': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'text.usetex': False} try: pl.rcParams.update(params) except: pass if s2ms: h1=self.get('center_h1') idx=np.where(h1[0]-h1>=1.e-3)[0][0] skip=idx else: skip=0 x = self.get('center_he4')[skip:] y = self.get('log_Teff')[skip:] if ifig is not None: pl.figure(ifig) if label is not None: if colour is not None: line,=pl.plot(x,y,label=label,color=colour) else: line,=pl.plot(x,y,label=label) pl.legend(loc='best').draw_frame(False) else: if colour is not None: line,=pl.plot(x,y,color=colour) else: line,=pl.plot(x,y) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) pl.xlim(lims[:2]) pl.ylim(lims[2:]) pl.xlabel('$X_{\\rm c}(\,^4{\\rm He}\,)$') pl.ylabel('$\log\,T_{\\rm eff}$')
def xche4_teff(self,ifig=None,lims=[1.,0.,3.4,4.7],label=None,colour=None, s2ms=True,dashes=None)
Plot effective temperature against central helium abundance. Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None s2ms : boolean, optional "Skip to Main Sequence" The default is True dashes : list, optional Custom dashing style. If None, ignore. The default is None.
2.485339
2.499893
0.994178
# fsize=18 # # params = {'axes.labelsize': fsize, # # 'font.family': 'serif', # 'font.family': 'Times New Roman', # 'figure.facecolor': 'white', # 'text.fontsize': fsize, # 'legend.fontsize': fsize, # 'xtick.labelsize': fsize*0.8, # 'ytick.labelsize': fsize*0.8, # 'text.usetex': False} # # try: # pl.rcParams.update(params) # except: # pass if ifig is not None: pl.figure(ifig) if label is not None: if colour is not None: line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),label=label, color=colour) else: line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'),label=label) else: if colour is not None: line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T'), color=colour) else: line,=pl.plot(self.get('log_center_Rho'),self.get('log_center_T')) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) pl.xlim(lims[:2]) pl.ylim(lims[2:]) pl.xlabel('log $\\rho_{\\rm c}$') pl.ylabel('log $T_{\\rm c}$')
def tcrhoc(self,ifig=None,lims=[3.,10.,8.,10.],label=None,colour=None, dashes=None)
Central temperature again central density plot Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None dashes : list, optional Custom dashing style. If None, ignore. The default is None.
1.913882
1.90783
1.003172
fsize=18 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'text.fontsize': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'text.usetex': False} try: pl.rcParams.update(params) except: pass if ifig is not None: pl.figure(ifig) if s2ms: h1=self.get('center_h1') idx=np.where(h1[0]-h1>=3.e-3)[0][0] skip=idx else: skip=0 gage= self.get('star_age') lage=np.zeros(len(gage)) agemin = max(old_div(abs(gage[-1]-gage[-2]),5.),1.e-10) for i in np.arange(len(gage)): if gage[-1]-gage[i]>agemin: lage[i]=np.log10(gage[-1]-gage[i]+agemin) else : lage[i]=np.log10(agemin) x = lage[skip:] y = self.get('log_abs_mdot')[skip:] if ifig is not None: pl.figure(ifig) if label is not None: if colour is not None: line,=pl.plot(x,y,label=label,color=colour) else: line,=pl.plot(x,y,label=label) else: if colour is not None: line,=pl.plot(x,y,color=colour) else: line,=pl.plot(x,y) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) pl.xlim(lims[:2]) pl.ylim(lims[2:]) pl.ylabel('$\mathrm{log}_{10}(\|\dot{M}\|/M_\odot\,\mathrm{yr}^{-1})$') pl.xlabel('$\mathrm{log}_{10}(t^*/\mathrm{yr})$')
def mdot_t(self,ifig=None,lims=[7.4,2.6,-8.5,-4.5],label=None,colour=None,s2ms=False, dashes=None)
Plot mass loss history as a function of log-time-left Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None s2ms : boolean, optional "skip to main sequence" dashes : list, optional Custom dashing style. If None, ignore. The default is None.
2.479932
2.549734
0.972624
fsize=18 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'text.fontsize': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'text.usetex': False} try: pl.rcParams.update(params) except: pass if ifig is not None: pl.figure(ifig) if s2ms: h1=self.get('center_h1') idx=np.where(h1[0]-h1>=3.e-3)[0][0] skip=idx else: skip=0 age= self.get('star_age') x1 = old_div(age, 1.e6) x2 = old_div(age, 1.e6) y1 = self.get('mix_qtop_1')*self.get('star_mass') y2 = self.get('mix_qtop_2')*self.get('star_mass') mt1 = self.get('mix_type_1') mt2 = self.get('mix_type_2') x1 = x1[skip:] x2 = x2[skip:] y1 = y1[skip:] y2 = y2[skip:] mt1 = mt1[skip:] mt2 = mt2[skip:] # Mask spikes... if mask: x1 = np.ma.masked_where(mt1 != 1, x1) x2 = np.ma.masked_where(mt2 != 1, x2) y1 = np.ma.masked_where(mt1 != 1, y1) y2 = np.ma.masked_where(mt2 != 1, y2) if ifig is not None: pl.figure(ifig) if label is not None: if colour is not None: line,=pl.plot(x1,y1,label=label,color=colour) line,=pl.plot(x2,y2,color=colour) else: line,=pl.plot(x1,y1,label=label) line,=pl.plot(x2,y2) else: if colour is not None: line,=pl.plot(x1,y1,color=colour) line,=pl.plot(x2,y2,color=colour) else: line,=pl.plot(x1,y1) line,=pl.plot(x2,y2) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) pl.xlim(lims[:2]) pl.ylim(lims[2:]) pl.ylabel('$M/M_\odot}$') pl.xlabel('$t/{\\rm Myr}$')
def mcc_t(self,ifig=None,lims=[0,15,0,25],label=None,colour=None, mask=False,s2ms=False,dashes=None)
Plot mass of [oclark01@scandium 15M_led_f_print_nets]$ cp ../15M_led_f_ppcno/ective core as a function of time. Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None mask : boolean, optional Do you want to try to hide numerical spikes in the plot? The default is False s2ms : boolean, optional skip to main squence? dashes : list, optional Custom dashing style. If None, ignore. The default is None.
1.994677
2.016373
0.98924
if num_frame >= 0: pyl.figure(num_frame) if xax == 'time': xaxisarray = self.get('star_age')[t0_model:] elif xax == 'model': xaxisarray = self.get('model_number')[t0_model:] elif xax == 'logrevtime': xaxisarray = self.get('star_age') xaxisarray=np.log10(max(xaxisarray[t0_model:])+t_eps-xaxisarray[t0_model:]) else: print('t-surfabu error: invalid string for x-axis selction.'+ \ ' needs to be "time" or "model"') star_mass = self.get('star_mass') surface_c12 = self.get('surface_c12') surface_c13 = self.get('surface_c13') surface_n14 = self.get('surface_n14') surface_o16 = self.get('surface_o16') target_n14 = -3.5 COratio=old_div((surface_c12*4.),(surface_o16*3.)) t0_mod=xaxisarray[t0_model] log10_c12=np.log10(surface_c12[t0_model:]) symbs=['k:','-','--','-.','b:','-','--','k-.',':','-','--','-.'] pyl.plot(xaxisarray,log10_c12,\ symbs[0],label='$^{12}\mathrm{C}$') pyl.plot(xaxisarray,np.log10(surface_c13[t0_model:]),\ symbs[1],label='$^{13}\mathrm{C}$') pyl.plot(xaxisarray,np.log10(surface_n14[t0_model:]),\ symbs[2],label='$^{14}\mathrm{N}$') pyl.plot(xaxisarray,np.log10(surface_o16[t0_model:]),\ symbs[3],label='$^{16}\mathrm{O}$') # pyl.plot([min(xaxisarray[t0_model:]-t0_mod),max(xaxisarray[t0_model:]-t0_mod)],[target_n14,target_n14]) pyl.ylabel('mass fraction $\log X$') pyl.legend(loc=2) if xax == 'time': pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number') elif xax == 'logrevtime': pyl.xlabel('$\\log t-tfinal$') if plot_CO_ratio: pyl.twinx() pyl.plot(xaxisarray,COratio[t0_model:],'-k',label='CO ratio') pyl.ylabel('C/O ratio') pyl.legend(loc=4) pyl.title(title) if xax == 'logrevtime': self._xlimrev()
def t_surfabu(self, num_frame, xax, t0_model=0, title='surface abundance', t_eps=1.e-3, plot_CO_ratio=False)
t_surfabu plots surface abundance evolution as a function of time. Parameters ---------- num_frame : integer Number of frame to plot this plot into, if <0 don't open figure. xax : string Either model, time or logrevtime to indicate what is to be used on the x-axis. t0_model : integer, optional Model for the zero point in time, for AGB plots this would be usually the model of the 1st TP, which can be found with the Kippenhahn plot. The default is 0. title : string, optional Figure title. The default is "surface abundance". t_eps : float, optional Time eps at end for logrevtime. The default is 1.e-3. plot_CO_ratio : boolean, optional On second axis True/False. The default is False.
2.841265
2.709448
1.048651
pyl.figure(num_frame) if xax == 'time': xaxisarray = self.get('star_age') elif xax == 'model': xaxisarray = self.get('model_number') else: print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"') logLH = self.get('log_LH') logLHe = self.get('log_LHe') pyl.plot(xaxisarray,logLH,label='L_(H)') pyl.plot(xaxisarray,logLHe,label='L(He)') pyl.ylabel('log L') pyl.legend(loc=2) if xax == 'time': pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number')
def t_lumi(self,num_frame,xax)
Luminosity evolution as a function of time or model. Parameters ---------- num_frame : integer Number of frame to plot this plot into. xax : string Either model or time to indicate what is to be used on the x-axis
3.959393
3.682775
1.075111
pyl.figure(num_frame) if xax == 'time': xaxisarray = self.get('star_age') elif xax == 'model': xaxisarray = self.get('model_number') else: print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"') logL = self.get('log_L') logTeff = self.get('log_Teff') pyl.plot(xaxisarray,logL,'-k',label='log L') pyl.plot(xaxisarray,logTeff,'-k',label='log Teff') pyl.ylabel('log L, log Teff') pyl.legend(loc=2) if xax == 'time': pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number')
def t_surf_parameter(self, num_frame, xax)
Surface parameter evolution as a function of time or model. Parameters ---------- num_frame : integer Number of frame to plot this plot into. xax : string Either model or time to indicate what is to be used on the x-axis
3.428636
3.283291
1.044268
star_mass = self.get('star_mass') he_lumi = self.get('log_LHe') h_lumi = self.get('log_LH') mx2_bot = self.get('mx2_bot')*star_mass try: h1_boundary_mass = self.get('h1_boundary_mass') he4_boundary_mass = self.get('he4_boundary_mass') except: try: h1_boundary_mass = self.get('he_core_mass') he4_boundary_mass = self.get('c_core_mass') except: pass TP_bot=np.array(self.get('conv_mx2_bot'))*np.array(self.get('star_mass')) TP_top=np.array(self.get('conv_mx2_top'))*np.array(self.get('star_mass')) lum_array=[] activate=False models=[] pdcz_size=[] for i in range(len(h1_boundary_mass)): if (h1_boundary_mass[i]-he4_boundary_mass[i] <0.2) and (he4_boundary_mass[i]>0.2): if (mx2_bot[i]>he4_boundary_mass[i]) and (he_lumi[i]>h_lumi[i]): if TP_top[i]>he4_boundary_mass[i]: pdcz_size.append(TP_top[i]-TP_bot[i]) activate=True lum_array.append(he_lumi[i]) models.append(i) #print(TP_bot[i],TP_top[i]) if (activate == True) and (he_lumi[i]<h_lumi[i]): #if fake tp if max(pdcz_size)<1e-5: active=False lum_array=[] models=[] print('fake tp') else: break t0_model = models[np.argmax(lum_array)] return t0_model
def find_first_TP(self)
Find first TP of the TPAGB phase and returns the model number at its LHe maximum. Parameters ----------
3.311531
3.307885
1.001102
number_DUP=(old_div(len(modeln),2) -1) #START WITH SECOND try: h1_bnd_m=self.get('h1_boundary_mass') except: try: h1_bnd_m=self.get('he_core_mass') except: pass star_mass=self.get('star_mass') age=self.get("star_age") firstTP=h1_bnd_m[modeln[0]] first_m_dredge=h1_bnd_m[modeln[1]] DUP_parameter=np.zeros(number_DUP) DUP_xaxis=np.zeros(number_DUP) j=0 for i in np.arange(2,len(modeln),2): TP=h1_bnd_m[modeln[i]] m_dredge=h1_bnd_m[modeln[i+1]] if i ==2: last_m_dredge=first_m_dredge #print "testest" #print modeln[i] if h_core_mass==True: DUP_xaxis[j]=h1_bnd_m[modeln[i]] #age[modeln[i]] - age[modeln[0]] else: DUP_xaxis[j]=star_mass[modeln[i]] #DUP_xaxis[j]=modeln[i] DUP_parameter[j]=old_div((TP-m_dredge),(TP-last_m_dredge)) last_m_dredge=m_dredge j+=1 pl.figure(fig) pl.rcParams.update({'font.size': 18}) pl.rc('xtick', labelsize=18) pl.rc('ytick', labelsize=18) pl.plot(DUP_xaxis,DUP_parameter,marker=marker_type,markersize=12,mfc=color,color='k',linestyle='-',label=label) if h_core_mass==True: pl.xlabel("$M_H$",fontsize=20) else: pl.xlabel("M/M$_{\odot}$",fontsize=24) pl.ylabel("$\lambda_{DUP}$",fontsize=24) pl.minorticks_on() pl.legend()
def calc_DUP_parameter(self, modeln, label, fig=10, color='r', marker_type='*', h_core_mass=False)
Method to calculate the DUP parameter evolution for different TPs specified specified by their model number. Parameters ---------- fig : integer Figure number to plot. modeln : list Array containing pairs of models each corresponding to a TP. First model where h boundary mass will be taken before DUP, second model where DUP reaches lowest mass. leg : string Plot label. color : string Color of the plot. marker_type : string marker type. h_core_mass : boolean, optional If True: plot dependence from h free core , else star mass. The default is False.
2.88257
2.902599
0.9931
auth_protocol = snmp_cred.get('auth_protocol') priv_protocol = snmp_cred.get('priv_protocol') auth_user = snmp_cred.get('auth_user') auth_prot_pp = snmp_cred.get('auth_prot_pp') auth_priv_pp = snmp_cred.get('auth_priv_pp') if ((not auth_protocol) and priv_protocol): priv_protocol = ( MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol]) usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp, auth_priv_pp, privProtocol=priv_protocol) elif ((not priv_protocol) and auth_protocol): auth_protocol = ( MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol]) usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp, auth_priv_pp, authProtocol=auth_protocol) elif not all([priv_protocol and auth_protocol]): usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp, auth_priv_pp) else: auth_protocol = ( MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol]) priv_protocol = ( MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol]) usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp, auth_priv_pp, authProtocol=auth_protocol, privProtocol=priv_protocol) return usm_user_obj
def _create_usm_user_obj(snmp_cred)
Creates the UsmUserData obj for the given credentials. This method creates an instance for the method hlapi.UsmUserData. The UsmUserData() allows the 'auth_protocol' and 'priv_protocol' to be undefined by user if their pass phrases are provided. :param snmp_cred: Dictionary of SNMP credentials. auth_user: SNMP user auth_protocol: Auth Protocol auth_prot_pp: Pass phrase value for AuthProtocol. priv_protocol:Privacy Protocol. auth_priv_pp: Pass phrase value for Privacy Protocol. :returns UsmUserData object as per given credentials.
1.637006
1.545177
1.059429
result = {} usm_user_obj = _create_usm_user_obj(snmp_credentials) try: for(errorIndication, errorStatus, errorIndex, varBinds) in hlapi.nextCmd( hlapi.SnmpEngine(), usm_user_obj, hlapi.UdpTransportTarget((iLOIP, 161), timeout=3, retries=3), hlapi.ContextData(), # cpqida cpqDaPhyDrvTable Drive Array Physical Drive Table hlapi.ObjectType( hlapi.ObjectIdentity('1.3.6.1.4.1.232.3.2.5.1')), # cpqscsi SCSI Physical Drive Table hlapi.ObjectType( hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.2.4.1')), # cpqscsi SAS Physical Drive Table hlapi.ObjectType( hlapi.ObjectIdentity('1.3.6.1.4.1.232.5.5.2.1')), lexicographicMode=False, ignoreNonIncreasingOid=True): if errorIndication: LOG.error(errorIndication) msg = "SNMP failed to traverse MIBs %s", errorIndication raise exception.IloSNMPInvalidInputFailure(msg) else: if errorStatus: msg = ('Parsing MIBs failed. %s at %s' % ( errorStatus.prettyPrint(), errorIndex and varBinds[-1][int(errorIndex)-1] or '?' ) ) LOG.error(msg) raise exception.IloSNMPInvalidInputFailure(msg) else: for varBindTableRow in varBinds: name, val = tuple(varBindTableRow) oid, label, suffix = ( mibViewController.getNodeName(name)) key = name.prettyPrint() # Don't traverse outside the tables we requested if not (key.find("SNMPv2-SMI::enterprises.232.3") >= 0 or (key.find( "SNMPv2-SMI::enterprises.232.5") >= 0)): break if key not in result: result[key] = {} result[key][label[-1]] = {} result[key][label[-1]][suffix] = val except Exception as e: msg = "SNMP library failed with error %s", e LOG.error(msg) raise exception.IloSNMPExceptionFailure(msg) return result
def _parse_mibs(iLOIP, snmp_credentials)
Parses the MIBs. :param iLOIP: IP address of the server on which SNMP discovery has to be executed. :param snmp_credentials: a Dictionary of SNMP credentials. auth_user: SNMP user auth_protocol: Auth Protocol auth_prot_pp: Pass phrase value for AuthProtocol. priv_protocol:Privacy Protocol. auth_priv_pp: Pass phrase value for Privacy Protocol. :returns the dictionary of parsed MIBs. :raises exception.InvalidInputError if pysnmp is unable to get SNMP data due to wrong inputs provided. :raises exception.IloError if pysnmp raises any exception.
3.408019
3.411112
0.999093
# '1.3.6.1.4.1.232.5.5.1.1', # cpqscsi SAS HBA Table # '1.3.6.1.4.1.232.3.2.3.1', # cpqida Drive Array Logical Drive Table result = _parse_mibs(iLOIP, cred) disksize = {} for uuid in sorted(result): for key in result[uuid]: # We only track the Physical Disk Size if key.find('PhyDrvSize') >= 0: disksize[uuid] = dict() for suffix in sorted(result[uuid][key]): size = result[uuid][key][suffix] disksize[uuid][key] = str(size) return disksize
def _get_disksize_MiB(iLOIP, cred)
Reads the dictionary of parsed MIBs and gets the disk size. :param iLOIP: IP address of the server on which SNMP discovery has to be executed. :param snmp_credentials in a dictionary having following mandatory keys. auth_user: SNMP user auth_protocol: Auth Protocol auth_prot_pp: Pass phrase value for AuthProtocol. priv_protocol:Privacy Protocol. auth_priv_pp: Pass phrase value for Privacy Protocol. :returns the dictionary of disk sizes of all physical drives.
5.349825
5.487485
0.974914
disk_sizes = _get_disksize_MiB(iLOIP, snmp_credentials) max_size = 0 for uuid in disk_sizes: for key in disk_sizes[uuid]: if int(disk_sizes[uuid][key]) > max_size: max_size = int(disk_sizes[uuid][key]) max_size_gb = max_size/1024 return max_size_gb
def get_local_gb(iLOIP, snmp_credentials)
Gets the maximum disk size among all disks. :param iLOIP: IP address of the server on which SNMP discovery has to be executed. :param snmp_credentials in a dictionary having following mandatory keys. auth_user: SNMP user auth_protocol: Auth Protocol auth_prot_pp: Pass phrase value for AuthProtocol. priv_protocol:Privacy Protocol. auth_priv_pp: Pass phrase value for Privacy Protocol.
2.619075
2.920131
0.896903
''' Simple error handler for azure.''' message = str(http_error) if http_error.respbody is not None: message += '\n' + http_error.respbody.decode('utf-8-sig') raise AzureHttpError(message, http_error.status)
def _http_error_handler(http_error)
Simple error handler for azure.
4.481025
3.577047
1.252716
mac_dict = {} for eth in self.get_members(): if eth.mac_address is not None: if (eth.status is not None and eth.status.health == sys_cons.HEALTH_OK and eth.status.state == sys_cons.HEALTH_STATE_ENABLED): mac_dict.update( {'Port ' + eth.identity: eth.mac_address}) return mac_dict
def summary(self)
property to return the summary MAC addresses and state This filters the MACs whose health is OK, and in 'Enabled' State would be returned. The returned format will be {<port_id>: <mac_address>}. This is because RIBCL returns the data in format {'Port 1': 'aa:bb:cc:dd:ee:ff'} and ironic ilo drivers inspection consumes the data in this format. Note: 'Id' is referred to as "Port number".
5.661604
3.593117
1.575681
raid_config['physical_disks'] = [] physical_drives = server.get_physical_drives() for physical_drive in physical_drives: physical_drive_dict = physical_drive.get_physical_drive_dict() raid_config['physical_disks'].append(physical_drive_dict)
def _update_physical_disk_details(raid_config, server)
Adds the physical disk details to the RAID configuration passed.
1.947671
1.853699
1.050694
raid_schema_fobj = open(RAID_CONFIG_SCHEMA, 'r') raid_config_schema = json.load(raid_schema_fobj) try: jsonschema.validate(raid_config, raid_config_schema) except json_schema_exc.ValidationError as e: raise exception.InvalidInputError(e.message) for logical_disk in raid_config['logical_disks']: # If user has provided 'number_of_physical_disks' or # 'physical_disks', validate that they have mentioned at least # minimum number of physical disks required for that RAID level. raid_level = logical_disk['raid_level'] min_disks_reqd = constants.RAID_LEVEL_MIN_DISKS[raid_level] no_of_disks_specified = None if 'number_of_physical_disks' in logical_disk: no_of_disks_specified = logical_disk['number_of_physical_disks'] elif 'physical_disks' in logical_disk: no_of_disks_specified = len(logical_disk['physical_disks']) if (no_of_disks_specified and no_of_disks_specified < min_disks_reqd): msg = ("RAID level %(raid_level)s requires at least %(number)s " "disks." % {'raid_level': raid_level, 'number': min_disks_reqd}) raise exception.InvalidInputError(msg)
def validate(raid_config)
Validates the RAID configuration provided. This method validates the RAID configuration provided against a JSON schema. :param raid_config: The RAID configuration to be validated. :raises: InvalidInputError, if validation of the input fails.
2.22593
2.226623
0.999689
all_controllers = server.controllers supported_controllers = [c for c in all_controllers if select_condition(c)] if not supported_controllers: reason = ("None of the available SSA controllers %(controllers)s " "have %(msg)s" % {'controllers': ', '.join([c.id for c in all_controllers]), 'msg': msg}) raise exception.HPSSAOperationError(reason=reason) server.controllers = supported_controllers
def _select_controllers_by(server, select_condition, msg)
Filters out the hpssa controllers based on the condition. This method updates the server with only the controller which satisfies the condition. The controllers which doesn't satisfies the selection condition will be removed from the list. :param server: The object containing all the supported hpssa controllers details. :param select_condition: A lambda function to select the controllers based on requirement. :param msg: A String which describes the controller selection. :raises exception.HPSSAOperationError, if all the controller are in HBA mode.
3.476323
3.051791
1.139109
is_shared = (lambda x: True if ('share_physical_disks' in x and x['share_physical_disks']) else False) num_of_disks = (lambda x: x['number_of_physical_disks'] if 'number_of_physical_disks' in x else constants.RAID_LEVEL_MIN_DISKS[x['raid_level']]) # Separate logical disks based on share_physical_disks value. # 'logical_disks_shared' when share_physical_disks is True and # 'logical_disks_nonshared' when share_physical_disks is False logical_disks_shared = [] logical_disks_nonshared = [] for x in logical_disks: target = (logical_disks_shared if is_shared(x) else logical_disks_nonshared) target.append(x) # Separete logical disks with raid 1 from the 'logical_disks_shared' into # 'logical_disks_shared_raid1' and remaining as # 'logical_disks_shared_excl_raid1'. logical_disks_shared_raid1 = [] logical_disks_shared_excl_raid1 = [] for x in logical_disks_shared: target = (logical_disks_shared_raid1 if x['raid_level'] == '1' else logical_disks_shared_excl_raid1) target.append(x) # Sort the 'logical_disks_shared' in reverse order based on # 'number_of_physical_disks' attribute, if provided, otherwise minimum # disks required to create the logical volume. logical_disks_shared = sorted(logical_disks_shared_excl_raid1, reverse=True, key=num_of_disks) # Move RAID 1+0 to first in 'logical_disks_shared' when number of physical # disks needed to create logical volume cannot be shared with odd number of # disks and disks higher than that of RAID 1+0. check = True for x in logical_disks_shared: if x['raid_level'] == "1+0": x_num = num_of_disks(x) for y in logical_disks_shared: if y['raid_level'] != "1+0": y_num = num_of_disks(y) if x_num < y_num: check = (True if y_num % 2 == 0 else False) if check: break if not check: logical_disks_shared.remove(x) logical_disks_shared.insert(0, x) check = True # Final 'logical_disks_sorted' list should have non shared logical disks # first, followed by shared logical disks with RAID 1, and finally by the # shared logical disks sorted based on number of disks and RAID 1+0 # condition. logical_disks_sorted = (logical_disks_nonshared + logical_disks_shared_raid1 + logical_disks_shared) return logical_disks_sorted
def _sort_shared_logical_disks(logical_disks)
Sort the logical disks based on the following conditions. When the share_physical_disks is True make sure we create the volume which needs more disks first. This avoids the situation of insufficient disks for some logical volume request. For example, - two logical disk with number of disks - LD1(3), LD2(4) - have 4 physical disks In this case, if we consider LD1 first then LD2 will fail since not enough disks available to create LD2. So follow a order for allocation when share_physical_disks is True. Also RAID1 can share only when there is logical volume with only 2 disks. So make sure we create RAID 1 first when share_physical_disks is True. And RAID 1+0 can share only when the logical volume with even number of disks. :param logical_disks: 'logical_disks' to be sorted for shared logical disks. :returns: the logical disks sorted based the above conditions.
2.763714
2.602452
1.061965
server = objects.Server() select_controllers = lambda x: not x.properties.get('HBA Mode Enabled', False) _select_controllers_by(server, select_controllers, 'RAID enabled') for controller in server.controllers: # Trigger delete only if there is some RAID array, otherwise # hpssacli/ssacli will fail saying "no logical drives found.". if controller.raid_arrays: controller.delete_all_logical_drives() return get_configuration()
def delete_configuration()
Delete a RAID configuration on this server. :returns: the current RAID configuration after deleting all the logical disks.
11.8599
10.925411
1.085533
server = objects.Server() logical_drives = server.get_logical_drives() raid_config = {} raid_config['logical_disks'] = [] for logical_drive in logical_drives: logical_drive_dict = logical_drive.get_logical_drive_dict() raid_config['logical_disks'].append(logical_drive_dict) _update_physical_disk_details(raid_config, server) return raid_config
def get_configuration()
Get the current RAID configuration. Get the RAID configuration from the server and return it as a dictionary. :returns: A dictionary of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] }
3.012997
2.484114
1.212906
server = objects.Server() for controller in server.controllers: drives = [x for x in controller.unassigned_physical_drives if (x.get_physical_drive_dict().get('erase_status', '') == 'OK')] if drives: controller.erase_devices(drives) while not has_erase_completed(): time.sleep(300) server.refresh() status = {} for controller in server.controllers: drive_status = {x.id: x.erase_status for x in controller.unassigned_physical_drives} sanitize_supported = controller.properties.get( 'Sanitize Erase Supported', 'False') if sanitize_supported == 'False': msg = ("Drives overwritten with zeros because sanitize erase " "is not supported on the controller.") else: msg = ("Sanitize Erase performed on the disks attached to " "the controller.") drive_status.update({'Summary': msg}) status[controller.id] = drive_status return status
def erase_devices()
Erase all the drives on this server. This method performs sanitize erase on all the supported physical drives in this server. This erase cannot be performed on logical drives. :returns: a dictionary of controllers with drives and the erase status. :raises exception.HPSSAException, if none of the drives support sanitize erase.
5.136266
4.497766
1.141959
''' Extracts approximate messages count header. ''' metadata = _parse_metadata(response) headers = _parse_response_for_dict(response) metadata.approximate_message_count = _int_to_str(headers.get('x-ms-approximate-messages-count')) return metadata
def _parse_metadata_and_message_count(response)
Extracts approximate messages count header.
6.494436
3.60677
1.800624
''' Extracts pop receipt and time next visible from headers. ''' headers = _parse_response_for_dict(response) message = QueueMessage() message.pop_receipt = headers.get('x-ms-popreceipt') message.time_next_visible = parser.parse(headers.get('x-ms-time-next-visible')) return message
def _parse_queue_message_from_headers(response)
Extracts pop receipt and time next visible from headers.
4.169701
2.546925
1.637151
''' <?xml version="1.0" encoding="utf-8"?> <QueueMessagesList> <QueueMessage> <MessageId>string-message-id</MessageId> <InsertionTime>insertion-time</InsertionTime> <ExpirationTime>expiration-time</ExpirationTime> <PopReceipt>opaque-string-receipt-data</PopReceipt> <TimeNextVisible>time-next-visible</TimeNextVisible> <DequeueCount>integer</DequeueCount> <MessageText>message-body</MessageText> </QueueMessage> </QueueMessagesList> ''' if response is None or response.body is None: return response messages = list() list_element = ETree.fromstring(response.body) for message_element in list_element.findall('QueueMessage'): message = QueueMessage() message.id = message_element.findtext('MessageId') message.dequeue_count = message_element.findtext('DequeueCount') message.content = decode_function(message_element.findtext('MessageText')) message.insertion_time = parser.parse(message_element.findtext('InsertionTime')) message.expiration_time = parser.parse(message_element.findtext('ExpirationTime')) message.pop_receipt = message_element.findtext('PopReceipt') time_next_visible = message_element.find('TimeNextVisible') if time_next_visible is not None: message.time_next_visible = parser.parse(time_next_visible.text) # Add message to list messages.append(message) return messages
def _convert_xml_to_queue_messages(response, decode_function)
<?xml version="1.0" encoding="utf-8"?> <QueueMessagesList> <QueueMessage> <MessageId>string-message-id</MessageId> <InsertionTime>insertion-time</InsertionTime> <ExpirationTime>expiration-time</ExpirationTime> <PopReceipt>opaque-string-receipt-data</PopReceipt> <TimeNextVisible>time-next-visible</TimeNextVisible> <DequeueCount>integer</DequeueCount> <MessageText>message-body</MessageText> </QueueMessage> </QueueMessagesList>
1.905837
1.512496
1.260061
return sys_volumes.VolumeCollection( self._conn, utils.get_subresource_path_by(self, 'Volumes'), redfish_version=self.redfish_version)
def volumes(self)
This property prepares the list of volumes :return a list of volumes.
6.239212
7.713558
0.808863
drives_list = [] for member in self.drives: drives_list.append(sys_drives.Drive( self._conn, member.get('@odata.id'), self.redfish_version)) return drives_list
def _drives_list(self)
Gets the list of drives :return a list of drives.
5.448664
5.772398
0.943917
for member in self._drives_list(): if member.media_type == constants.MEDIA_TYPE_SSD: return True return False
def has_ssd(self)
Return true if any of the drive is ssd
6.770114
4.891886
1.383948
for member in self._drives_list(): if member.media_type == constants.MEDIA_TYPE_HDD: return True return False
def has_rotational(self)
Return true if any of the drive is HDD
10.052729
5.264463
1.909545
for member in self._drives_list(): if (member.media_type == constants.MEDIA_TYPE_SSD and member.protocol == constants.PROTOCOL_NVMe): return True return False
def has_nvme_ssd(self)
Return True if the drive is SSD and protocol is NVMe
5.144842
3.775244
1.362784
drv_rot_speed_rpm = set() for member in self._drives_list(): if member.rotation_speed_rpm is not None: drv_rot_speed_rpm.add(member.rotation_speed_rpm) return drv_rot_speed_rpm
def drive_rotational_speed_rpm(self)
Gets set of rotational speed of the disks
3.276561
2.872361
1.14072
return utils.max_safe([member.volumes.maximum_size_bytes for member in self.get_members()])
def volumes_maximum_size_bytes(self)
Gets the biggest logical drive :returns the size in MiB.
10.639565
14.032075
0.758232
drv_rot_speed_rpm = set() for member in self.get_members(): drv_rot_speed_rpm.update(member.drive_rotational_speed_rpm) return drv_rot_speed_rpm
def drive_rotational_speed_rpm(self)
Gets set of rotational speed of the disks
3.011449
2.608294
1.154567
''' Creates a TableService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.table.tableservice.TableService` ''' try: from ..table.tableservice import TableService return TableService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) except ImportError: raise Exception('The package azure-storage-table is required. ' + 'Please install it using "pip install azure-storage-table"')
def create_table_service(self)
Creates a TableService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.table.tableservice.TableService`
2.839884
2.043556
1.389677
''' Gets the properties of a storage account's Queue service, including logging, analytics and CORS rules. :param int timeout: The server timeout, expressed in seconds. :return: The queue service properties. :rtype: :class:`~azure.storage.models.ServiceProperties` ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path() request.query = [ ('restype', 'service'), ('comp', 'properties'), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_xml_to_service_properties(response.body)
def get_queue_service_properties(self, timeout=None)
Gets the properties of a storage account's Queue service, including logging, analytics and CORS rules. :param int timeout: The server timeout, expressed in seconds. :return: The queue service properties. :rtype: :class:`~azure.storage.models.ServiceProperties`
2.252386
1.608676
1.400149
''' Returns a generator to list the queues. The generator will lazily follow the continuation tokens returned by the service and stop when all queues have been returned or num_results is reached. If num_results is specified and the account has more than that number of queues, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str prefix: Filters the results to return only queues with names that begin with the specified prefix. :param int num_results: The maximum number of queues to return. :param bool include_metadata: Specifies that container metadata be returned in the response. :param str marker: An opaque continuation token. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. ''' include = 'metadata' if include_metadata else None kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include, 'marker': marker, 'timeout': timeout} resp = self._list_queues(**kwargs) return ListGenerator(resp, self._list_queues, (), kwargs)
def list_queues(self, prefix=None, num_results=None, include_metadata=False, marker=None, timeout=None)
Returns a generator to list the queues. The generator will lazily follow the continuation tokens returned by the service and stop when all queues have been returned or num_results is reached. If num_results is specified and the account has more than that number of queues, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str prefix: Filters the results to return only queues with names that begin with the specified prefix. :param int num_results: The maximum number of queues to return. :param bool include_metadata: Specifies that container metadata be returned in the response. :param str marker: An opaque continuation token. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call.
2.846747
1.3627
2.089048
''' Returns a list of queues under the specified account. Makes a single list request to the service. Used internally by the list_queues method. :param str prefix: Filters the results to return only queues with names that begin with the specified prefix. :param str marker: A token which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :param int max_results: The maximum number of queues to return. A single list request may return up to 1000 queues and potentially a continuation token which should be followed to get additional resutls. :param str include: Include this parameter to specify that the container's metadata be returned as part of the response body. :param int timeout: The server timeout, expressed in seconds. ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path() request.query = [ ('comp', 'list'), ('prefix', _to_str(prefix)), ('marker', _to_str(marker)), ('maxresults', _int_to_str(max_results)), ('include', _to_str(include)), ('timeout', _int_to_str(timeout)) ] response = self._perform_request(request) return _convert_xml_to_queues(response)
def _list_queues(self, prefix=None, marker=None, max_results=None, include=None, timeout=None)
Returns a list of queues under the specified account. Makes a single list request to the service. Used internally by the list_queues method. :param str prefix: Filters the results to return only queues with names that begin with the specified prefix. :param str marker: A token which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :param int max_results: The maximum number of queues to return. A single list request may return up to 1000 queues and potentially a continuation token which should be followed to get additional resutls. :param str include: Include this parameter to specify that the container's metadata be returned as part of the response body. :param int timeout: The server timeout, expressed in seconds.
2.712293
1.350907
2.007756
''' Creates a queue under the given account. :param str queue_name: The name of the queue to create. A queue name must be from 3 through 63 characters long and may only contain lowercase letters, numbers, and the dash (-) character. The first and last letters in the queue must be alphanumeric. The dash (-) character cannot be the first or last character. Consecutive dash characters are not permitted in the queue name. :param metadata: A dict containing name-value pairs to associate with the queue as metadata. Note that metadata names preserve the case with which they were created, but are case-insensitive when set or read. :type metadata: a dict mapping str to str :param bool fail_on_exist: Specifies whether to throw an exception if the queue already exists. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the queue was created. If fail_on_exist was set to True, this will throw instead of returning false. :rtype: bool ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = _get_path(queue_name) request.query = [('timeout', _int_to_str(timeout))] request.headers = [('x-ms-meta-name-values', metadata)] if not fail_on_exist: try: response = self._perform_request(request) if response.status == _HTTP_RESPONSE_NO_CONTENT: return False return True except AzureHttpError as ex: _dont_fail_on_exist(ex) return False else: response = self._perform_request(request) if response.status == _HTTP_RESPONSE_NO_CONTENT: raise AzureConflictHttpError( _ERROR_CONFLICT.format(response.message), response.status) return True
def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None)
Creates a queue under the given account. :param str queue_name: The name of the queue to create. A queue name must be from 3 through 63 characters long and may only contain lowercase letters, numbers, and the dash (-) character. The first and last letters in the queue must be alphanumeric. The dash (-) character cannot be the first or last character. Consecutive dash characters are not permitted in the queue name. :param metadata: A dict containing name-value pairs to associate with the queue as metadata. Note that metadata names preserve the case with which they were created, but are case-insensitive when set or read. :type metadata: a dict mapping str to str :param bool fail_on_exist: Specifies whether to throw an exception if the queue already exists. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the queue was created. If fail_on_exist was set to True, this will throw instead of returning false. :rtype: bool
2.794127
1.449068
1.928223
''' Retrieves user-defined metadata and queue properties on the specified queue. Metadata is associated with the queue as name-value pairs. :param str queue_name: The name of an existing queue. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary representing the queue metadata with an approximate_message_count int property on the dict estimating the number of messages in the queue. :rtype: a dict mapping str to str ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(queue_name) request.query = [ ('comp', 'metadata'), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _parse_metadata_and_message_count(response)
def get_queue_metadata(self, queue_name, timeout=None)
Retrieves user-defined metadata and queue properties on the specified queue. Metadata is associated with the queue as name-value pairs. :param str queue_name: The name of an existing queue. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary representing the queue metadata with an approximate_message_count int property on the dict estimating the number of messages in the queue. :rtype: a dict mapping str to str
3.053969
1.433897
2.129838
''' Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. :param str queue_name: The name of an existing queue. :param dict metadata: A dict containing name-value pairs to associate with the queue as metadata. :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = _get_path(queue_name) request.query = [ ('comp', 'metadata'), ('timeout', _int_to_str(timeout)), ] request.headers = [('x-ms-meta-name-values', metadata)] self._perform_request(request)
def set_queue_metadata(self, queue_name, metadata=None, timeout=None)
Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. :param str queue_name: The name of an existing queue. :param dict metadata: A dict containing name-value pairs to associate with the queue as metadata. :param int timeout: The server timeout, expressed in seconds.
2.004082
1.618873
1.23795
''' Returns details about any stored access policies specified on the queue that may be used with Shared Access Signatures. :param str queue_name: The name of an existing queue. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the queue. :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy` ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(queue_name) request.query = [ ('comp', 'acl'), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_xml_to_signed_identifiers(response.body)
def get_queue_acl(self, queue_name, timeout=None)
Returns details about any stored access policies specified on the queue that may be used with Shared Access Signatures. :param str queue_name: The name of an existing queue. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the queue. :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`
2.383312
1.469861
1.621454
''' Sets stored access policies for the queue that may be used with Shared Access Signatures. When you set permissions for a queue, the existing permissions are replaced. To update the queue’s permissions, call :func:`~get_queue_acl` to fetch all access policies associated with the queue, modify the access policy that you wish to change, and then call this function with the complete set of data to perform the update. When you establish a stored access policy on a queue, it may take up to 30 seconds to take effect. During this interval, a shared access signature that is associated with the stored access policy will throw an :class:`AzureHttpError` until the access policy becomes active. :param str queue_name: The name of an existing queue. :param signed_identifiers: A dictionary of access policies to associate with the queue. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy` :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = _get_path(queue_name) request.query = [ ('comp', 'acl'), ('timeout', _int_to_str(timeout)), ] request.body = _get_request_body( _convert_signed_identifiers_to_xml(signed_identifiers)) self._perform_request(request)
def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None)
Sets stored access policies for the queue that may be used with Shared Access Signatures. When you set permissions for a queue, the existing permissions are replaced. To update the queue’s permissions, call :func:`~get_queue_acl` to fetch all access policies associated with the queue, modify the access policy that you wish to change, and then call this function with the complete set of data to perform the update. When you establish a stored access policy on a queue, it may take up to 30 seconds to take effect. During this interval, a shared access signature that is associated with the stored access policy will throw an :class:`AzureHttpError` until the access policy becomes active. :param str queue_name: The name of an existing queue. :param signed_identifiers: A dictionary of access policies to associate with the queue. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy` :param int timeout: The server timeout, expressed in seconds.
3.253508
1.217906
2.671396
''' Adds a new message to the back of the message queue. The visibility timeout specifies the time that the message will be invisible. After the timeout expires, the message will become visible. If a visibility timeout is not specified, the default value of 0 is used. The message time-to-live specifies how long a message will remain in the queue. The message will be deleted from the queue when the time-to-live period expires. :param str queue_name: The name of the queue to put the message into. :param obj content: Message content. Allowed type is determined by the encode_function set on the service. Default is str. The encoded message can be up to 64KB in size. :param int visibility_timeout: If not specified, the default value is 0. Specifies the new visibility timeout value, in seconds, relative to server time. The value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibility_timeout should be set to a value smaller than the time-to-live value. :param int time_to_live: Specifies the time-to-live interval for the message, in seconds. The maximum time-to-live allowed is 7 days. If this parameter is omitted, the default time-to-live is 7 days. :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('queue_name', queue_name) _validate_not_none('content', content) request = HTTPRequest() request.method = 'POST' request.host = self._get_host() request.path = _get_path(queue_name, True) request.query = [ ('visibilitytimeout', _to_str(visibility_timeout)), ('messagettl', _to_str(time_to_live)), ('timeout', _int_to_str(timeout)) ] request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function)) self._perform_request(request)
def put_message(self, queue_name, content, visibility_timeout=None, time_to_live=None, timeout=None)
Adds a new message to the back of the message queue. The visibility timeout specifies the time that the message will be invisible. After the timeout expires, the message will become visible. If a visibility timeout is not specified, the default value of 0 is used. The message time-to-live specifies how long a message will remain in the queue. The message will be deleted from the queue when the time-to-live period expires. :param str queue_name: The name of the queue to put the message into. :param obj content: Message content. Allowed type is determined by the encode_function set on the service. Default is str. The encoded message can be up to 64KB in size. :param int visibility_timeout: If not specified, the default value is 0. Specifies the new visibility timeout value, in seconds, relative to server time. The value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibility_timeout should be set to a value smaller than the time-to-live value. :param int time_to_live: Specifies the time-to-live interval for the message, in seconds. The maximum time-to-live allowed is 7 days. If this parameter is omitted, the default time-to-live is 7 days. :param int timeout: The server timeout, expressed in seconds.
2.566079
1.289724
1.989634
''' Retrieves one or more messages from the front of the queue. When a message is retrieved from the queue, the response includes the message content and a pop_receipt value, which is required to delete the message. The message is not automatically deleted from the queue, but after it has been retrieved, it is not visible to other clients for the time interval specified by the visibility_timeout parameter. :param str queue_name: The name of the queue to get messages from. :param int num_messages: A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. :param int visibility_timeout: Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 1 second, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. :param int timeout: The server timeout, expressed in seconds. :return: A list of :class:`~azure.storage.queue.models.QueueMessage` objects. :rtype: list of :class:`~azure.storage.queue.models.QueueMessage` ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(queue_name, True) request.query = [ ('numofmessages', _to_str(num_messages)), ('visibilitytimeout', _to_str(visibility_timeout)), ('timeout', _int_to_str(timeout)) ] response = self._perform_request(request) return _convert_xml_to_queue_messages(response, self.decode_function)
def get_messages(self, queue_name, num_messages=None, visibility_timeout=None, timeout=None)
Retrieves one or more messages from the front of the queue. When a message is retrieved from the queue, the response includes the message content and a pop_receipt value, which is required to delete the message. The message is not automatically deleted from the queue, but after it has been retrieved, it is not visible to other clients for the time interval specified by the visibility_timeout parameter. :param str queue_name: The name of the queue to get messages from. :param int num_messages: A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. :param int visibility_timeout: Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 1 second, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. :param int timeout: The server timeout, expressed in seconds. :return: A list of :class:`~azure.storage.queue.models.QueueMessage` objects. :rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
2.267197
1.300958
1.742713
''' Deletes all messages from the specified queue. :param str queue_name: The name of the queue whose messages to clear. :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'DELETE' request.host = self._get_host() request.path = _get_path(queue_name, True) request.query = [('timeout', _int_to_str(timeout))] self._perform_request(request)
def clear_messages(self, queue_name, timeout=None)
Deletes all messages from the specified queue. :param str queue_name: The name of the queue whose messages to clear. :param int timeout: The server timeout, expressed in seconds.
2.179227
1.743852
1.249663
if not isinstance(secure_boot_enable, bool): msg = ('The parameter "%(parameter)s" value "%(value)s" is ' 'invalid. Valid values are: True/False.' % {'parameter': 'secure_boot_enable', 'value': secure_boot_enable}) raise exception.InvalidInputError(msg) self._conn.patch(self.path, data={'SecureBootEnable': secure_boot_enable})
def enable_secure_boot(self, secure_boot_enable)
Enable/Disable secure boot on the server. Caller needs to reset the server after issuing this command to bring this into effect. :param secure_boot_enable: True, if secure boot needs to be enabled for next boot, else False. :raises: InvalidInputError, if the validation of the input fails :raises: SushyError, on an error from iLO.
3.215369
2.793211
1.151137
reset_keys_action = self._get_reset_keys_action_element() if not reset_keys_action.allowed_values: LOG.warning('Could not figure out the allowed values for the ' 'reset keys in secure boot %s', self.path) return set(mappings.SECUREBOOT_RESET_KEYS_MAP_REV) return set([mappings.SECUREBOOT_RESET_KEYS_MAP[v] for v in set(mappings.SECUREBOOT_RESET_KEYS_MAP). intersection(reset_keys_action.allowed_values)])
def get_allowed_reset_keys_values(self)
Get the allowed values for resetting the system. :returns: A set with the allowed values.
4.398382
4.635808
0.948784
valid_keys_resets = self.get_allowed_reset_keys_values() if target_value not in valid_keys_resets: msg = ('The parameter "%(parameter)s" value "%(target_value)s" is ' 'invalid. Valid values are: %(valid_keys_reset_values)s' % {'parameter': 'target_value', 'target_value': target_value, 'valid_keys_reset_values': valid_keys_resets}) raise exception.InvalidInputError(msg) value = mappings.SECUREBOOT_RESET_KEYS_MAP_REV[target_value] target_uri = ( self._get_reset_keys_action_element().target_uri) self._conn.post(target_uri, data={'ResetKeysType': value})
def reset_keys(self, target_value)
Resets the secure boot keys. :param target_value: The target value to be set. :raises: InvalidInputError, if the target value is not allowed. :raises: SushyError, on an error from iLO.
4.117058
3.22372
1.277114
''' Extracts out resource properties and metadata information. Ignores the standard http headers. ''' if response is None or response.headers is None: return None props = result_class() for key, value in response.headers: info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) if info: if info[0] is None: setattr(props, info[1], info[2](value)) else: attr = getattr(props, info[0]) setattr(attr, info[1], info[2](value)) return props
def _parse_properties(response, result_class)
Extracts out resource properties and metadata information. Ignores the standard http headers.
3.976872
2.551752
1.558487
''' Extracts name-values from response header. Filter out the standard http headers.''' if response is None: return None http_headers = ['server', 'date', 'location', 'host', 'via', 'proxy-connection', 'connection'] return_dict = _HeaderDict() if response.headers: for name, value in response.headers: if not name.lower() in http_headers: return_dict[name] = value return return_dict
def _parse_response_for_dict(response)
Extracts name-values from response header. Filter out the standard http headers.
5.907051
3.534986
1.671025
''' Adds an insert entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.insert_entity` for more information on inserts. The operation will not be executed until the batch is committed. :param entity: The entity to insert. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` ''' request = _insert_entity(entity) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
def insert_entity(self, entity)
Adds an insert entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.insert_entity` for more information on inserts. The operation will not be executed until the batch is committed. :param entity: The entity to insert. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity`
4.625636
1.677972
2.756682
''' Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*). ''' request = _update_entity(entity, if_match) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
def update_entity(self, entity, if_match='*')
Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*).
3.706991
1.349122
2.747706
''' Adds a merge entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more information on merges. The operation will not be executed until the batch is committed. :param entity: The entity to merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The merge operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional merge, set If-Match to the wildcard character (*). ''' request = _merge_entity(entity, if_match) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
def merge_entity(self, entity, if_match='*')
Adds a merge entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more information on merges. The operation will not be executed until the batch is committed. :param entity: The entity to merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The merge operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional merge, set If-Match to the wildcard character (*).
3.881847
1.350107
2.875215
''' Adds an insert or replace entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more information on insert or replace operations. The operation will not be executed until the batch is committed. :param entity: The entity to insert or replace. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` ''' request = _insert_or_replace_entity(entity) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
def insert_or_replace_entity(self, entity)
Adds an insert or replace entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more information on insert or replace operations. The operation will not be executed until the batch is committed. :param entity: The entity to insert or replace. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity`
4.207181
1.706857
2.46487
''' Adds an insert or merge entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.insert_or_merge_entity` for more information on insert or merge operations. The operation will not be executed until the batch is committed. :param entity: The entity to insert or merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` ''' request = _insert_or_merge_entity(entity) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
def insert_or_merge_entity(self, entity)
Adds an insert or merge entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.insert_or_merge_entity` for more information on insert or merge operations. The operation will not be executed until the batch is committed. :param entity: The entity to insert or merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity`
4.020687
1.605367
2.504528
return ISCSISettings( self._conn, utils.get_subresource_path_by( self, ["@Redfish.Settings", "SettingsObject"]), redfish_version=self.redfish_version)
def iscsi_settings(self)
Property to provide reference to iSCSI settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset.
7.527366
8.325448
0.904139
self._conn.patch(self.path, data=iscsi_data)
def update_iscsi_settings(self, iscsi_data)
Update iscsi data :param data: default iscsi config data
9.493635
11.278797
0.841724
return array_controller.HPEArrayControllerCollection( self._conn, utils.get_subresource_path_by( self, ['Links', 'ArrayControllers']), redfish_version=self.redfish_version)
def array_controllers(self)
This property gets the list of instances for array controllers This property gets the list of instances for array controllers :returns: a list of instances of array controllers.
7.281698
6.694935
1.087643
retry_count = retries # Delay for ``delay_before_attempts`` secs, before beginning any attempt time.sleep(delay_before_attempts) while retry_count: try: LOG.debug("Calling '%s', retries left: %d", has_operation_completed.__name__, retry_count) if has_operation_completed(): break except exception.IloError: pass time.sleep(delay_bw_retries) retry_count -= 1 else: LOG.debug("Max retries exceeded with: '%s'", has_operation_completed.__name__) if not is_silent_loop_exit: raise failover_exc(failover_msg)
def wait_for_operation_to_complete( has_operation_completed, retries=10, delay_bw_retries=5, delay_before_attempts=10, failover_exc=exception.IloError, failover_msg=("Operation did not complete even after multiple " "attempts."), is_silent_loop_exit=False)
Attempts the provided operation for a specified number of times. If it runs out of attempts, then it raises an exception. On success, it breaks out of the loop. :param has_operation_completed: the method to retry and it needs to return a boolean to indicate success or failure. :param retries: number of times the operation to be (re)tried, default 10 :param delay_bw_retries: delay in seconds before attempting after each failure, default 5. :param delay_before_attempts: delay in seconds before beginning any operation attempt, default 10. :param failover_exc: the exception which gets raised in case of failure upon exhausting all the attempts, default IloError. :param failover_msg: the msg with which the exception gets raised in case of failure upon exhausting all the attempts. :param is_silent_loop_exit: decides if exception has to be raised (in case of failure upon exhausting all the attempts) or not, default False (will be raised). :raises: failover_exc, if failure happens even after all the attempts, default IloError.
2.716232
2.762523
0.983243
is_ilo_up_after_reset = lambda: ilo_object.get_product_name() is not None is_ilo_up_after_reset.__name__ = 'is_ilo_up_after_reset' wait_for_operation_to_complete( is_ilo_up_after_reset, failover_exc=exception.IloConnectionError, failover_msg='iLO is not up after reset.' )
def wait_for_ilo_after_reset(ilo_object)
Continuously polls for iLO to come up after reset.
3.749083
3.340057
1.122461
p_state = ['IDLE'] c_state = ['IDLE'] def has_firmware_flash_completed(): curr_state, curr_percent = ris_object.get_firmware_update_progress() p_state[0] = c_state[0] c_state[0] = curr_state if (((p_state[0] == 'PROGRESSING') and (c_state[0] in ['COMPLETED', 'ERROR', 'UNKNOWN', 'IDLE'])) or (p_state[0] == 'IDLE' and (c_state[0] in ['COMPLETED', 'ERROR']))): return True return False wait_for_operation_to_complete( has_firmware_flash_completed, delay_bw_retries=30, failover_msg='iLO firmware update has failed.' ) wait_for_ilo_after_reset(ris_object)
def wait_for_ris_firmware_update_to_complete(ris_object)
Continuously polls for iLO firmware update to complete.
4.427957
4.235718
1.045385
def is_ilo_reset_initiated(): try: LOG.debug(ribcl_object._('Checking for iLO reset...')) ribcl_object.get_product_name() return False except exception.IloError: LOG.debug(ribcl_object._('iLO is being reset...')) return True # Note(deray): wait for 5 secs, before checking if iLO reset got triggered # at every interval of 6 secs. This looping call happens for 10 times. # Once it comes out of the wait of iLO reset trigger, then it starts # waiting for iLO to be up again after reset. wait_for_operation_to_complete( is_ilo_reset_initiated, delay_bw_retries=6, delay_before_attempts=5, is_silent_loop_exit=True ) wait_for_ilo_after_reset(ribcl_object)
def wait_for_ribcl_firmware_update_to_complete(ribcl_object)
Continuously checks for iLO firmware update to complete.
7.001441
6.930871
1.010182
base_target_filename = os.path.basename(target_file) file_name, file_ext_with_dot = os.path.splitext(base_target_filename) return file_name, file_ext_with_dot
def get_filename_and_extension_of(target_file)
Gets the base filename and extension of the target file. :param target_file: the complete path of the target file :returns: base filename and extension
2.27219
2.731306
0.831906
mode = os.stat(target_file).st_mode os.chmod(target_file, mode | stat.S_IXUSR)
def add_exec_permission_to(target_file)
Add executable permissions to the file :param target_file: the target file whose permission to be changed
2.139511
3.048939
0.701723
if not ilo_ver_str: return None try: # Note(vmud213):This logic works for all strings # that contain the version info as <major>.<minor> # Formats of the strings: # Release version -> "2.50 Feb 18 2016" # Debug version -> "iLO 4 v2.50" # random version -> "XYZ ABC 2.30" pattern = re.search(ILO_VER_STR_PATTERN, ilo_ver_str) if pattern: matched = pattern.group(0) if matched: return matched return None except Exception: return None
def get_major_minor(ilo_ver_str)
Extract the major and minor number from the passed string :param ilo_ver_str: the string that contains the version information :returns: String of the form "<major>.<minor>" or None
7.33842
7.631774
0.961561
boot_mode_bios = 'false' boot_mode_uefi = 'false' if (supported_boot_mode_constant == constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY): boot_mode_bios = 'true' elif (supported_boot_mode_constant == constants.SUPPORTED_BOOT_MODE_UEFI_ONLY): boot_mode_uefi = 'true' elif (supported_boot_mode_constant == constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI): boot_mode_bios = 'true' boot_mode_uefi = 'true' return SupportedBootModes(boot_mode_bios=boot_mode_bios, boot_mode_uefi=boot_mode_uefi)
def get_supported_boot_modes(supported_boot_mode_constant)
Retrieves the server supported boot modes It retrieves the server supported boot modes as a namedtuple containing 'boot_mode_bios' as 'true'/'false' (in string format) and 'boot_mode_uefi' again as true'/'false'. :param supported_boot_mode_constant: supported boot_mode constant :returns: A namedtuple containing ``boot_mode_bios`` and ``boot_mode_uefi`` with 'true'/'false' set accordingly for legacy BIOS and UEFI boot modes.
1.754733
1.626767
1.078663
# Normally all properties look like this: # Unique Identifier: 600508B1001CE4ACF473EE9C826230FF # Disk Name: /dev/sda # Mount Points: None key = '' value = '' try: key, value = string.split(': ') except ValueError: # This handles the case when the property of a logical drive # returned is as follows. Here we cannot split by ':' because # the disk id has colon in it. So if this is about disk, # then strip it accordingly. # Mirror Group 0: physicaldrive 6I:1:5 string = string.lstrip(' ') if string.startswith('physicaldrive'): fields = string.split(' ') # Include fields[1] to key to avoid duplicate pairs # with the same 'physicaldrive' key key = fields[0] + " " + fields[1] value = fields[1] else: # TODO(rameshg87): Check if this ever occurs. return string.strip(' '), None return key.strip(' '), value.strip(' ')
def _get_key_value(string)
Return the (key, value) as a tuple from a string.
9.212882
8.936385
1.030941
info = {} current_item = None i = start_index while i < len(lines): current_line = lines[i] current_line_indentation = _get_indentation(current_line) # Check for multi-level returns if current_line_indentation < indentation: return info, i-1 if current_line_indentation == indentation: current_item = current_line.lstrip(' ') info[current_item] = {} i = i + 1 continue if i < len(lines) - 1: next_line_indentation = _get_indentation(lines[i+1]) else: next_line_indentation = current_line_indentation if next_line_indentation > current_line_indentation: ret_dict, i = _get_dict(lines, i, current_line_indentation, deep+1) for key in ret_dict.keys(): if key in info[current_item]: info[current_item][key].update(ret_dict[key]) else: info[current_item][key] = ret_dict[key] else: key, value = _get_key_value(current_line) if key: info[current_item][key] = value # Do not return if it's the top level of recursion if next_line_indentation < current_line_indentation and deep > 0: return info, i i = i + 1 return info, i
def _get_dict(lines, start_index, indentation, deep)
Recursive function for parsing hpssacli/ssacli output.
2.124718
2.107826
1.008014
lines = stdout.split("\n") lines = list(filter(None, lines)) info_dict, j = _get_dict(lines, 0, 0, 0) return info_dict
def _convert_to_dict(stdout)
Wrapper function for parsing hpssacli/ssacli command. This function gets the output from hpssacli/ssacli command and calls the recursive function _get_dict to return the complete dictionary containing the RAID information.
5.190015
3.835874
1.35302
dont_transform_to_hpssa_exception = kwargs.get( 'dont_transform_to_hpssa_exception', False) kwargs.pop('dont_transform_to_hpssa_exception', None) try: if os.path.exists("/usr/sbin/ssacli"): stdout, stderr = processutils.execute("ssacli", *args, **kwargs) else: stdout, stderr = processutils.execute("hpssacli", *args, **kwargs) except (OSError, processutils.ProcessExecutionError) as e: if 'No controllers detected' in str(e): msg = ("SSA controller not found. Enable ssa controller" " to continue with the desired operation") raise exception.HPSSAOperationError(reason=msg) elif not dont_transform_to_hpssa_exception: raise exception.HPSSAOperationError(reason=e) else: raise return stdout, stderr
def _ssacli(*args, **kwargs)
Wrapper function for executing hpssacli/ssacli command. This function executes ssacli command if it exists, else it falls back to hpssacli. :param args: args to be provided to hpssacli/ssacli command :param kwargs: kwargs to be sent to processutils except the following: - dont_transform_to_hpssa_exception - Set to True if this method shouldn't transform other exceptions to hpssa exceptions only when hpssa controller is available. This is useful when the return code from hpssacli/ssacli is useful for analysis. :returns: a tuple containing the stdout and stderr after running the process. :raises: HPSSAOperationError, if some error was encountered and dont_dont_transform_to_hpssa_exception was set to False. :raises: OSError or processutils.ProcessExecutionError if execution failed and dont_transform_to_hpssa_exception was set to True.
3.232406
2.196067
1.471907
config = self._get_all_details() raid_info = _convert_to_dict(config) self.controllers = [] for key, value in raid_info.items(): self.controllers.append(Controller(key, value, self)) self.last_updated = time.time()
def refresh(self)
Refresh the server and it's child objects. This method removes all the cache information in the server and it's child objects, and fetches the information again from the server using hpssacli/ssacli command. :raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
5.56327
6.18342
0.899708
for controller in self.controllers: if controller.id == id: return controller return None
def get_controller_by_id(self, id)
Get the controller object given the id. This method returns the controller object for given id. :param id: id of the controller, for example 'Smart Array P822 in Slot 2' :returns: Controller object which has the id or None if the controller is not found.
2.797159
4.541491
0.615912
logical_drives = [] for controller in self.controllers: for array in controller.raid_arrays: for logical_drive in array.logical_drives: logical_drives.append(logical_drive) return logical_drives
def get_logical_drives(self)
Get all the RAID logical drives in the Server. This method returns all the RAID logical drives on the server by examining all the controllers. :returns: a list of LogicalDrive objects.
2.743936
2.426391
1.130871
physical_drives = [] for controller in self.controllers: # First add unassigned physical drives. for physical_drive in controller.unassigned_physical_drives: physical_drives.append(physical_drive) # Now add physical drives part of RAID arrays. for array in controller.raid_arrays: for physical_drive in array.physical_drives: physical_drives.append(physical_drive) return physical_drives
def get_physical_drives(self)
Get all the RAID physical drives on the Server. This method returns all the physical drives on the server by examining all the controllers. :returns: a list of PhysicalDrive objects.
2.765632
2.545333
1.08655
disk = [x for x in self.get_logical_drives() if x.wwn == wwn] if disk: return disk[0] return None
def get_logical_drive_by_wwn(self, wwn)
Get the logical drive object given the wwn. This method returns the logical drive object with the given wwn. :param wwn: wwn of the logical drive :returns: LogicalDrive object which has the wwn or None if logical drive is not found.
2.694011
3.588518
0.750731
for phy_drive in self.unassigned_physical_drives: if phy_drive.id == id: return phy_drive for array in self.raid_arrays: for phy_drive in array.physical_drives: if phy_drive.id == id: return phy_drive return None
def get_physical_drive_by_id(self, id)
Get a PhysicalDrive object for given id. This method examines both assigned and unassigned physical drives of the controller and returns the physical drive. :param id: id of physical drive, for example '5I:1:1'. :returns: PhysicalDrive object having the id, or None if physical drive is not found.
2.44203
2.495353
0.978631
slot = self.properties['Slot'] base_cmd = ("controller", "slot=%s" % slot) cmd = base_cmd + args return _ssacli(*cmd, **kwargs)
def execute_cmd(self, *args, **kwargs)
Execute a given hpssacli/ssacli command on the controller. This method executes a given command on the controller. :params args: a tuple consisting of sub-commands to be appended after specifying the controller in hpssacli/ssacli command. :param kwargs: kwargs to be passed to execute() in processutils :raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
10.91806
7.747467
1.409243
cmd_args = [] if 'array' in logical_drive_info: cmd_args.extend(['array', logical_drive_info['array']]) cmd_args.extend(['create', "type=logicaldrive"]) if 'physical_disks' in logical_drive_info: phy_drive_ids = ','.join(logical_drive_info['physical_disks']) cmd_args.append("drives=%s" % phy_drive_ids) raid_level = logical_drive_info['raid_level'] # For RAID levels (like 5+0 and 6+0), HPSSA names them differently. # Check if we have mapping stored, otherwise use the same. raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get( raid_level, raid_level) cmd_args.append("raid=%s" % raid_level) # If size_gb is MAX, then don't pass size argument. HPSSA will # automatically allocate the maximum # disks size possible to the # logical disk. if logical_drive_info['size_gb'] != "MAX": size_mb = logical_drive_info['size_gb'] * 1024 cmd_args.append("size=%s" % size_mb) self.execute_cmd(*cmd_args, process_input='y')
def create_logical_drive(self, logical_drive_info)
Create a logical drive on the controller. This method creates a logical drive on the controller when the logical drive details and physical drive ids are passed to it. :param logical_drive_info: a dictionary containing the details of the logical drive as specified in raid config. :raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
3.797273
3.597708
1.05547
cmd_args = [] cmd_args.append("pd %s" % drive) cmd_args.extend(['modify', 'erase', pattern]) if pattern != 'erasepattern=zero': cmd_args.append('unrestricted=off') cmd_args.append('forced') return cmd_args
def _get_erase_command(self, drive, pattern)
Return the command arguments based on the pattern. Erase command examples: 1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase erasepattern=overwrite unrestricted=off forced" 2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase erasepattern=zero forced" :param drive: A string with comma separated list of drives. :param pattern: A string which defines the type of erase. :returns: A list of ssacli command arguments.
8.229786
3.512159
2.343227
for drive in drives: pattern = 'overwrite' if ( drive.disk_type == constants.DISK_TYPE_HDD) else 'block' cmd_args = self._get_erase_command( drive.id, 'erasepattern=%s' % pattern) stdout = self.execute_cmd(*cmd_args) LOG.debug("Sanitize disk erase invoked with erase pattern as " "'%(pattern)s' on disk type: %(disk_type)s." % {'pattern': pattern, 'disk_type': drive.disk_type}) if "not supported" in str(stdout): new_pattern = 'zero' cmd_args = self._get_erase_command(drive.id, 'erasepattern=zero') self.execute_cmd(*cmd_args) LOG.debug("Sanitize disk erase invoked with erase pattern as " "'%(pattern)s' is not supported on disk type: " "%(disk_type)s. Now its invoked with erase pattern " "as %(new_pattern)s." % {'pattern': pattern, 'disk_type': drive.disk_type, 'new_pattern': new_pattern})
def erase_devices(self, drives)
Perform Erase on all the drives in the controller. This method erases all the hdd and ssd drives in the controller by overwriting the drives with patterns for hdd and erasing storage blocks for ssd drives. The drives would be unavailable until successful completion or failure of erase operation. If the sanitize erase is not supported on any disk it will try to populate zeros on disk drives. :param drives: A list of drive objects in the controller. :raises: HPSSAOperationError, if sanitize erase is not supported.
3.39204
3.286395
1.032146
raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get( logical_disk['raid_level'], logical_disk['raid_level']) args = ("array", self.id, "create", "type=logicaldrive", "raid=%s" % raid_level, "size=?") if logical_disk['size_gb'] != "MAX": desired_disk_size = logical_disk['size_gb'] else: desired_disk_size = constants.MINIMUM_DISK_SIZE try: stdout, stderr = self.parent.execute_cmd( *args, dont_transform_to_hpssa_exception=True) except processutils.ProcessExecutionError as ex: # hpssacli/ssacli returns error code 1 when RAID level of the # logical disk is not supported on the array. # If that's the case, just return saying the logical disk # cannot be accomodated in the array. # If exist_code is not 1, then it's some other error that we # don't expect to appear and hence raise it back. if ex.exit_code == 1: return False else: raise exception.HPSSAOperationError(reason=ex) except Exception as ex: raise exception.HPSSAOperationError(reason=ex) # TODO(rameshg87): This always returns in MB, but confirm with # HPSSA folks. match = re.search('Max: (\d+)', stdout) if not match: return False max_size_gb = int(match.group(1)) / 1024 return desired_disk_size <= max_size_gb
def can_accomodate(self, logical_disk)
Check if this RAID array can accomodate the logical disk. This method uses hpssacli/ssacli command's option to check if the logical disk with desired size and RAID level can be created on this RAID array. :param logical_disk: Dictionary of logical disk to be created. :returns: True, if logical disk can be created on the RAID array False, otherwise.
4.474746
4.17766
1.071113
if isinstance(self.parent, RaidArray): controller = self.parent.parent.id status = 'active' else: controller = self.parent.id status = 'ready' return {'size_gb': self.size_gb, 'controller': controller, 'id': self.id, 'disk_type': self.disk_type, 'interface_type': self.interface_type, 'model': self.model, 'firmware': self.firmware, 'status': status, 'erase_status': self.erase_status}
def get_physical_drive_dict(self)
Returns a dictionary of with the details of the physical drive.
3.097323
3.011714
1.028425
lg_raid_lvls = set() for member in self.get_members(): lg_raid_lvls.add(mappings.RAID_LEVEL_MAP_REV.get(member.raid)) return lg_raid_lvls
def logical_raid_levels(self)
Gets the raid level for each logical volume :returns the set of list of raid levels configured.
4.786927
5.220164
0.917007
# Allow some few mime type like plain text, images and audio. if mimetype in MIMETYPE_WHITELIST: return mimetype # Rewrite HTML, JavaScript, CSS etc to text/plain. if mimetype in MIMETYPE_PLAINTEXT or \ (filename and filename.lower() in MIMETYPE_TEXTFILES): return 'text/plain' # Default return 'application/octet-stream'
def sanitize_mimetype(mimetype, filename=None)
Sanitize a MIME type so the browser does not render the file.
5.919997
5.869635
1.00858
assert len(path) > path_dimensions * split_length uri_parts = [] for i in range(path_dimensions): uri_parts.append(path[0:split_length]) path = path[split_length:] uri_parts.append(path) uri_parts.append(filename) return os.path.join(base_uri, *uri_parts)
def make_path(base_uri, path, filename, path_dimensions, split_length)
Generate a path as base location for file instance. :param base_uri: The base URI. :param path: The relative path. :param path_dimensions: Number of chunks the path should be split into. :param split_length: The length of any chunk. :returns: A string representing the full path.
2.121756
2.349634
0.903015
chunk_size = chunk_size_or_default(chunk_size) bytes_read = 0 while 1: chunk = stream.read(chunk_size) if not chunk: if progress_callback: progress_callback(bytes_read) break message_digest.update(chunk) bytes_read += len(chunk) if progress_callback: progress_callback(bytes_read) return "{0}:{1}".format(algo, message_digest.hexdigest())
def compute_checksum(stream, algo, message_digest, chunk_size=None, progress_callback=None)
Get helper method to compute checksum from a stream. :param stream: File-like object. :param algo: Identifier for checksum algorithm. :param messsage_digest: A message digest instance. :param chunk_size: Read at most size bytes from the file at a time. :param progress_callback: Function accepting one argument with number of bytes read. (Default: ``None``) :returns: The checksum.
1.899856
2.298001
0.826743