code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
firmware_file_path = _get_firmware_file(searching_path)
if not firmware_file_path:
return None
# Note(deray): the path of the new firmware file will be of the form:
#
# [TEMP_DIR]/xxx-xxx_actual_firmware_filename
#
# e.g. /tmp/77e8f689-f32c-4727-9fc3-a7dacefe67e4_ilo4_210.bin
file_name, file_ext_with_dot = common.get_filename_and_extension_of(
firmware_file_path)
new_firmware_file_path = os.path.join(
tempfile.gettempdir(), str(uuid.uuid4()) + '_' +
file_name + file_ext_with_dot)
# create a hard link to the raw firmware file
os.link(firmware_file_path, new_firmware_file_path)
return new_firmware_file_path | def _get_firmware_file_in_new_path(searching_path) | Gets the raw firmware file in a new path
Gets the raw firmware file from the extracted directory structure
and creates a hard link to that in a file path and cleans up the
lookup extract path.
:param searching_path: the directory structure to search for
:returns: the raw firmware file with the complete new path | 4.29519 | 4.371674 | 0.982505 |
self.hostname, self.port = addressinfo
self.timeout = timeout
filename = self.fw_file
firmware = open(filename, 'rb').read()
# generate boundary
boundary = b('------hpiLO3t' +
str(random.randint(100000, 1000000)) + 'z')
while boundary in firmware:
boundary = b('------hpiLO3t' +
str(random.randint(100000, 1000000)) + 'z')
# generate body parts
parts = [
# body1
b("--") + boundary +
b(
),
# body2
b("\r\n--") + boundary +
b('''\r\nContent-Disposition: form-data; name="fwimgfile"; '''
'''filename="''') +
b(filename) +
b('''"\r\nContent-Type: application/octet-stream\r\n\r\n'''),
# firmware image
firmware,
# body3
b("\r\n--") + boundary + b("--\r\n"),
]
total_bytes = sum([len(x) for x in parts])
sock = self._get_socket()
# send the firmware image
sock.write(b(self.HTTP_UPLOAD_HEADER %
(total_bytes, boundary.decode('ascii'))))
for part in parts:
sock.write(part)
data = ''
try:
while True:
d = sock.read()
data += d.decode('latin-1')
if not d:
break
except socket.sslerror: # Connection closed
e = sys.exc_info()[1]
if not data:
raise exception.IloConnectionError(
"Communication with %(hostname)s:%(port)d failed: "
"%(error)s" % {'hostname': self.hostname,
'port': self.port, 'error': str(e)})
# Received len(data) bytes
cookie_match = re.search('Set-Cookie: *(.*)', data)
if not cookie_match:
raise exception.IloError("Uploading of file: %s failed due "
"to unknown reason." % filename)
# return the cookie
return cookie_match.group(1) | def upload_file_to(self, addressinfo, timeout) | Uploads the raw firmware file to iLO
Uploads the raw firmware file (already set as attribute in
FirmwareImageControllerBase constructor) to iLO, whose address
information is passed to this method.
:param addressinfo: tuple of hostname and port of the iLO
:param timeout: timeout in secs, used for connecting to iLO
:raises: IloInvalidInputError, if raw firmware file not found
:raises: IloError, for other internal problems
:returns: the cookie so sent back from iLO on successful upload | 3.230479 | 3.113221 | 1.037664 |
err = None
sock = None
try:
for res in socket.getaddrinfo(
self.hostname, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(self.timeout)
# Connecting to {self.hostname} at port {self.port}
sock.connect(sa)
except socket.timeout:
if sock is not None:
sock.close()
err = exception.IloConnectionError(
"Timeout connecting to %(hostname)s:%(port)d"
% {'hostname': self.hostname, 'port': self.port})
except socket.error:
if sock is not None:
sock.close()
e = sys.exc_info()[1]
err = exception.IloConnectionError(
"Error connecting to %(hostname)s:%(port)d : %(error)s"
% {'hostname': self.hostname, 'port': self.port,
'error': str(e)})
except Exception:
raise exception.IloConnectionError(
"Unable to resolve %s" % self.hostname)
if err is not None:
raise err
# wrapping the socket over ssl session
try:
return ssl.wrap_socket(sock, ssl_version=sslversion)
except socket.sslerror:
e = sys.exc_info()[1]
msg = (getattr(e, 'reason', None) or
getattr(e, 'message', None))
# Some older iLO s don't support TLSv1, retry with SSLv3
if ('wrong version number' in msg) and (
sslversion == ssl.PROTOCOL_TLSv1):
return self._get_socket(ssl.PROTOCOL_SSLv3)
raise exception.IloConnectionError(
"Cannot establish ssl session with %(hostname)s:%(port)d : "
"%(error)s" % {'hostname': self.hostname, 'port': self.port,
'error': str(e)}) | def _get_socket(self, sslversion=ssl.PROTOCOL_TLSv1) | Sets up an https connection and do an HTTP/raw socket request
:param sslversion: version of ssl session
:raises: IloConnectionError, for connection failures
:returns: ssl wrapped socket object | 2.206896 | 2.165963 | 1.018898 |
target_file = self.fw_file
common.add_exec_permission_to(target_file)
# create a temp directory where the extraction will occur
temp_dir = tempfile.mkdtemp()
extract_path = os.path.join(temp_dir, self.fw_filename)
try:
self._do_extract(target_file, extract_path)
except exception.ImageExtractionFailed:
# clean up the partial extracted content, if any,
# along with temp dir and re-raise the exception
shutil.rmtree(temp_dir, ignore_errors=True)
raise
# creating a new hard link to the core firmware file
firmware_file_path = _get_firmware_file_in_new_path(extract_path)
# delete the entire extracted content along with temp dir.
shutil.rmtree(temp_dir, ignore_errors=True)
if not firmware_file_path:
raise exception.InvalidInputError(
"Raw firmware file not found in: '%s'" % target_file)
return firmware_file_path, True | def extract(self) | Extracts the raw firmware file from its compact format
Extracts the raw firmware file from its compact file format (already
set as attribute in FirmwareImageControllerBase constructor).
:raises: InvalidInputError, if raw firmware file not found
:raises: ImageExtractionFailed, for extraction related issues
:returns: the raw firmware file with the complete path
:returns: boolean(True) to indicate that a new file got generated
after successful extraction. | 4.561529 | 4.02705 | 1.132722 |
logical_drives = raid_config["LogicalDrives"]
logical_disks = []
controller = controller
for ld in logical_drives:
prop = {'size_gb': ld['CapacityGiB'],
'raid_level': ld['Raid'].strip('Raid'),
'root_device_hint': {
'wwn': '0x' + ld['VolumeUniqueIdentifier']},
'controller': controller,
'physical_disks': ld['DataDrives'],
'volume_name': ld['LogicalDriveName']}
logical_disks.append(prop)
return logical_disks | def _generic_format(self, raid_config, controller=None) | Convert redfish data of current raid config to generic format.
:param raid_config: Raid configuration dictionary
:param controller: Array controller model in post_create read else
None
:returns: current raid config. | 4.439833 | 4.404794 | 1.007955 |
ssc_mesg = self.smart_storage_config_message
result = True
raid_message = ""
for element in ssc_mesg:
if "Success" not in element['MessageId']:
result = False
raid_message = element['MessageId']
return result, raid_message | def _check_smart_storage_message(self) | Check for smart storage message.
:returns: result, raid_message | 5.248734 | 3.949549 | 1.328945 |
if controller:
if not self.logical_drives:
msg = ('No logical drives found on the controller')
LOG.debug(msg)
raise exception.IloLogicalDriveNotFoundError(msg)
raid_op = 'create_raid'
else:
raid_op = 'delete_raid'
result, raid_message = self._check_smart_storage_message()
if result:
configured_raid_settings = self._conn.get(self.settings_uri)
raid_data = {
'logical_disks': self._generic_format(
configured_raid_settings.json(), controller=controller)}
return raid_data
else:
if self.physical_drives is None or not raid_message:
# This controller is not configured or controller
# not used in raid operation
return
else:
msg = ('Failed to perform the %(opr)s operation '
'successfully. Error - %(error)s'
% {'opr': raid_op, 'error': str(raid_message)})
raise exception.IloError(msg) | def read_raid(self, controller=None) | Get the current RAID configuration from the system.
:param controller: If controller model its post-create read else
post-delete
:returns: current raid config. | 5.30203 | 5.250813 | 1.009754 |
if not self.logical_drives:
msg = ('No logical drives found on the controller '
'%(controller)s' % {'controller': str(self.controller_id)})
LOG.debug(msg)
raise exception.IloLogicalDriveNotFoundError(msg)
lds = [{
'Actions': [{"Action": "LogicalDriveDelete"}],
'VolumeUniqueIdentifier':
logical_drive.volume_unique_identifier}
for logical_drive in self.logical_drives]
data = {'LogicalDrives': lds, 'DataGuard': 'Permissive'}
self._conn.put(self.settings_uri, data=data) | def delete_raid(self) | Clears the RAID configuration from the system. | 5.398935 | 5.208737 | 1.036515 |
manager.validate(raid_config)
logical_drives = raid_config['logical_disks']
redfish_logical_disk = []
for ld in logical_drives:
ld_attr = {"Raid": "Raid" + ld["raid_level"]}
ld_attr[
"CapacityGiB"] = -1 if ld[
"size_gb"] == "MAX" else int(ld["size_gb"])
if 'physical_disks' in ld:
ld_attr["DataDrives"] = ld["physical_disks"]
else:
datadrives = {}
if 'number_of_physical_disks' in ld:
datadrives["DataDriveCount"] = (
ld["number_of_physical_disks"])
else:
datadrives["DataDriveCount"] = (constants.
RAID_LEVEL_MIN_DISKS
[ld["raid_level"]])
if 'disk_type' in ld:
datadrives["DataDriveMediaType"] = ld["disk_type"]
if 'interface_type' in ld:
datadrives["DataDriveInterfaceType"] = ld["interface_type"]
ld_attr["DataDrives"] = datadrives
if 'volume_name' in ld:
ld_attr["LogicalDriveName"] = ld["volume_name"]
redfish_logical_disk.append(ld_attr)
data = {
"DataGuard": "Disabled",
"LogicalDrives": redfish_logical_disk
}
self._conn.put(self.settings_uri, data=data) | def create_raid(self, raid_config) | Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]} | 2.939524 | 2.769675 | 1.061325 |
'''
Private method that reads in the data file and organizes it
within this object.
'''
if sldir.endswith('/'):
fname = str(sldir)+str(fname)
else:
fname = str(sldir)+'/'+str(fname)
f=open(fname,'r')
# read header line
line=f.readline()
cols = []
ispec = 0
for i in range(1,len(line.split('|'))):
col = line.split('|')[i].strip()
if '-' in col:
ispec += 1
col = col.split('-')[1]
cols.append(col)
col_num={}
col_tot = len(cols)
print('number of species: ', str(ispec))
print('number of cols: ', str(col_tot))
col_num={}
for a,b in zip(cols,list(range(col_tot))):
col_num[a]=b
# read remainder of the file
lines=f.readlines()
data=[]
for i in range(len(lines)):
v=lines[i].split()
vv=array(v,dtype='float')
data.append(vv)
ilines=i
print("There are "+str(ilines)+" time steps found.")
return data,col_num,cols,col_tot,ilines | def _readFile(self, fname, sldir) | Private method that reads in the data file and organizes it
within this object. | 3.841095 | 3.339646 | 1.15015 |
'''
get one data column with the data
Parameters
----------
col_str : string
One of the column strings in self.cols.
'''
data_column=zeros(self.ilines)
for i in range(self.ilines):
data_column[i]=self.data[i][self.col_num[col_str]]
return data_column | def get(self, col_str) | get one data column with the data
Parameters
----------
col_str : string
One of the column strings in self.cols. | 5.559365 | 2.874601 | 1.933961 |
'''
make a simple plot of two columns against each other.
An example would be instance.plot_xtime('PB206', label='PB206 vs t_y'
Recomend using the plot function DataPlot.plot() it has more
functionality.
Parameters
----------
Y : string
Column on Y-axis.
X : string, optional
Column on X-axis. The default is "time".
label : string, optional
Legend label. The default is "default".
labelX : string, optional
The label on the X axis. The default is None.
labelY : string, optional
The label on the Y axis. The default is None.
title : string, optional
The Title of the Graph. The default is None.
shape : string, optional
What shape and colour the user would like their plot in.
The default is '.'.
logX : boolean, optional
A boolean of weather the user wants the x axis
logarithmically. The default is False.
logY : boolean, optional
A boolean of weather the user wants the Y axis
logarithmically. The default is True.
base : integer, optional
The base of the logarithm. The default is 10.
Notes
-----
For all possable choices visit,
<http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot>
'''
if label is 'default':
lab_str=y
else:
lab_str=label
try:
self.get(x)
except KeyError:
x='age'
DataPlot.plot(self,x,y,legend=lab_str,labelx=labelx, labely=labely,
title=title, shape=shape,logx=logx, logy=logy, base=base)
'''
print X,Y
xdat=self.get(X)
ydat=self.get(Y)
self.xdat = xdat
self.ydat = ydat
plot(xdat,log10(ydat),label=lab_str)
legend()
''' | def plot_xtime(self, y, x='time', label='default', labelx=None,
labely=None ,title=None, shape='.', logx=False,
logy=True, base=10) | make a simple plot of two columns against each other.
An example would be instance.plot_xtime('PB206', label='PB206 vs t_y'
Recomend using the plot function DataPlot.plot() it has more
functionality.
Parameters
----------
Y : string
Column on Y-axis.
X : string, optional
Column on X-axis. The default is "time".
label : string, optional
Legend label. The default is "default".
labelX : string, optional
The label on the X axis. The default is None.
labelY : string, optional
The label on the Y axis. The default is None.
title : string, optional
The Title of the Graph. The default is None.
shape : string, optional
What shape and colour the user would like their plot in.
The default is '.'.
logX : boolean, optional
A boolean of weather the user wants the x axis
logarithmically. The default is False.
logY : boolean, optional
A boolean of weather the user wants the Y axis
logarithmically. The default is True.
base : integer, optional
The base of the logarithm. The default is 10.
Notes
-----
For all possable choices visit,
<http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot> | 3.655049 | 1.541092 | 2.371726 |
fname=self.findFile(fname,numtype)
if self.inputdir == '':
self.inputdir = self.sldir # This chunk of code changes into the directory where fname is,
os.chdir(self.inputdir) # and appends a '/' to the directory title so it accesses the
self.sldir=os.getcwd() + '/' # file correctly
f=open(fname,'r')
lines=f.readlines()
if self.inputdir != './': #This chunk of code changes back into the directory you started in.
os.chdir(self.startdir)
self.sldir = self.inputdir
for i in range(len(lines)):
lines[i]=lines[i].strip()
for i in range(len(lines)):
if lines[i].startswith('#'):
lines[i]=lines[i].strip('#')
tmp=lines[i].split()
tmp1=[]
for j in range(len(tmp)):
if tmp[j] != '=' or '':
tmp1.append(tmp[j])
tmp=tmp1
for j in range(len(tmp)):
if tmp[j]== attri:
try:
if '.' in tmp[j+1]:
return float(tmp[j+1])
else:
return int(tmp[j+1])
except ValueError:
return str(tmp[j+1])
elif lines[i].startswith('H'):
continue
else:
print('This cycle attribute does not exist')
print('Returning None')
return None | def getCycleData(self, attri, fname, numtype='cycNum') | In this method a column of data for the associated cycle
attribute is returned.
Parameters
----------
attri : string
The name of the attribute we are looking for.
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum". | 3.82342 | 3.876476 | 0.986313 |
fname=self.findFile(fname,numtype)
f=open(fname,'r')
for i in range(self.index+1):
f.readline()
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
lines[i]=lines[i].split()
index=0
data=[]
while index < len (self.dcols):
if attri== self.dcols[index]:
break
index+=1
for i in range(len(lines)):
if index==5 and len(lines[i])==7:
data.append(str(lines[i][index].capitalize())+'-'\
+str(lines[i][index+1]))
elif index==5 and len(lines[i])!=7:
tmp=str(lines[i][index])
if tmp[len(tmp)-1].isdigit():
tmp1=tmp[0]+tmp[1]
tmp1=tmp1.capitalize()
tmp2=''
for j in range(len(tmp)):
if j == 0 or j == 1:
continue
tmp2+=tmp[j]
data.append(tmp1+'-'+tmp2)
elif tmp=='PROT':
data.append('H-1')
elif tmp==('NEUT'or'NEUTR'or'nn'or'N 1'or'N-1'):
data.append('N-1')
else:
data.append(tmp)
elif index==0:
data.append(int(lines[i][index]))
else:
data.append(float(lines[i][index]))
return array(data) | def getColData(self, attri, fname, numtype='cycNum') | In this method a column of data for the associated column
attribute is returned.
Parameters
----------
attri : string
The name of the attribute we are looking for.
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum". | 2.966337 | 3.09461 | 0.958549 |
'''
In this method instead of getting a particular column of data,
the program gets a particular row of data for a particular
element name.
attri : string
The name of the attribute we are looking for. A complete
list of them can be obtained by calling
>>> get('element_name')
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
Returns
-------
array
A numpy array of the four element attributes, number, Z, A
and abundance, in that order.
Notes
-----
Warning
'''
element=[] #Variable for holding the list of element names
number=[] #Variable for holding the array of numbers
z=[] #Variable for holding the array of z
a=[] #Variable for holding the array of a
abd=[] #Variable for holding the array of Abundance
data=[] #variable for the final list of data
fname=self.findFile(fname,numtype)
f=open(fname,'r')
for i in range(self.index+1):
f.readline()
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
lines[i]=lines[i].split()
index=0
data=[]
while index < len (self.dcols):
if attri== self.dcols[index]:
break
index+=1
element=self.get(self.dcols[5],fname,numtype)
number=[]
z=[]
a=[]
isom=[]
abd=[]
for i in range(len(lines)):
number.append(int(lines[i][0]))
z.append(float(lines[i][1]))
isom.append(float(lines[i][2]))
abd.append(float(lines[i][1]))
index=0 #Variable for determing the index in the data columns
while index < len(element):
if attri == element[index]:
break
index+=1
data.append(number[index])
data.append(z[index])
data.append(a[index])
data.append(isom[index])
data.append(abd[index])
return array(data) | def getElement(self, attri, fname, numtype='cycNum') | In this method instead of getting a particular column of data,
the program gets a particular row of data for a particular
element name.
attri : string
The name of the attribute we are looking for. A complete
list of them can be obtained by calling
>>> get('element_name')
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
Returns
-------
array
A numpy array of the four element attributes, number, Z, A
and abundance, in that order.
Notes
-----
Warning | 3.781422 | 2.038869 | 1.854666 |
''' Private method for getting a cycle, called from get.'''
yps=self.get('ABUNDANCE_MF', cycle)
z=self.get('Z', cycle) #charge
a=self.get('A', cycle) #mass
isomers=self.get('ISOM', cycle)
a_iso_to_plot,z_iso_to_plot,abunds,isotope_to_plot,el_iso_to_plot,isom=\
self._process_abundance_vector(a,z,isomers,yps)
self.a_iso_to_plot=a_iso_to_plot
self.isotope_to_plot=isotope_to_plot
self.z_iso_to_plot=z_iso_to_plot
self.el_iso_to_plot=el_iso_to_plot
self.abunds=array(abunds)
self.isom=isom
if decayed:
try:
self.decay_idp
except AttributeError:
print("WARNING: decayed in _getcycle ignores isomers " \
"and will decay alpha-unstable p-rich nuclei as if they were beta+ stable.")
print("Initialising decay index pointers ....")
self.decay_indexpointer() # provides self.decay_idp and
ind_tmp=self.idp_to_stables_in_isostoplot
isotope_decay=array(isotope_to_plot)[ind_tmp]
z_iso_decay=array(z_iso_to_plot)[ind_tmp]
a_iso_decay=array(a_iso_to_plot)[ind_tmp]
el_iso_decay=array(el_iso_to_plot)[ind_tmp]
abunds_decay=zeros(len(ind_tmp), dtype='float64')
for i in range(len(isotope_to_plot)):
idp=where(isotope_decay==isotope_to_plot[self.decay_idp[i]])[0] # points from
# i on isotope_to_plot scale to decay target_on_decayed array scale
abunds_decay[idp] += abunds[i]
if self.debug:
print("Decayed array:")
for i in range(len(ind_tmp)):
print(isotope_decay[i], z_iso_decay[i], a_iso_decay[i], el_iso_decay[i], abunds_decay[i])
self.a_iso_to_plot=a_iso_decay
self.isotope_to_plot=isotope_decay
self.z_iso_to_plot=z_iso_decay
self.el_iso_to_plot=el_iso_decay
self.abunds=abunds_decay | def _getcycle(self, cycle, decayed=False) | Private method for getting a cycle, called from get. | 3.838504 | 3.713577 | 1.033641 |
''' Private method for getting an attribute, called from get.'''
if str(fname.__class__)=="<type 'list'>":
isList=True
else:
isList=False
data=[]
if fname==None:
fname=self.files
numtype='file'
isList=True
if isList:
for i in range(len(fname)):
if attri in self.cattrs:
data.append(self.getCycleData(attri,fname[i],numtype))
elif attri in self.dcols:
data.append(self.getColData(attri,fname[i],numtype))
elif attri in self.get('ISOTP',fname,numtype):
data.append(self.getElement(attri,fname[i],numtype))
else:
print('Attribute '+attri+ ' does not exist')
print('Returning none')
return None
else:
if attri in self.cattrs:
return self.getCycleData(attri,fname,numtype)
elif attri in self.dcols:
return self.getColData(attri,fname,numtype)
elif attri in self.get('ISOTP',fname,numtype):
return self.getElement(attri,fname,numtype)
else:
print('Attribute '+attri+ ' does not exist')
print('Returning none')
return None
return data | def _getattr(self, attri, fname=None, numtype='cycNum') | Private method for getting an attribute, called from get. | 2.537511 | 2.376815 | 1.06761 |
'''
Private method that reads in and organizes the .ppn file
Loads the data of the .ppn file into the variable cols.
'''
if sldir.endswith(os.sep):
#Making sure fname will be formatted correctly
fname = str(sldir)+str(fname)
else:
fname = str(sldir)+os.sep+str(fname)
self.sldir+=os.sep
f=open(fname,'r')
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
cols = ['ISOTP', 'ABUNDANCE_MF'] #These are constant, .ppn files have no header to read from
for i in range(len(lines)):
if not lines[i].startswith('H'):
index = i-1
break
return cols, index | def _readPPN(self, fname, sldir) | Private method that reads in and organizes the .ppn file
Loads the data of the .ppn file into the variable cols. | 6.676397 | 4.522584 | 1.476235 |
'''
private method that reads in and organizes the .DAT file
Loads the data of the .DAT File into the variables cattrs and cols.
In both these cases they are dictionaries, but in the case of cols,
it is a dictionary of numpy array exect for the element ,
element_name where it is just a list
'''
cattrs=[]
if sldir.endswith(os.sep):
#Making sure fname will be formatted correctly
fname = str(sldir)+str(fname)
else:
fname = str(sldir)+os.sep+str(fname)
self.sldir+=os.sep
f=open(fname,'r')
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
cols=lines[0].strip('H')
cols=cols.strip()
cols=cols.split()
for i in range(len(lines)):
if lines[i].startswith('#'):
# if it is a cycle attribute line
lines[i]=lines[i].strip('#')
tmp=lines[i].split()
tmp1=[]
for j in range(len(tmp)):
if tmp[j] != '=' or '':
tmp1.append(tmp[j])
tmp=tmp1
j=0
while j <len(tmp):
cattrs.append(tmp[j])
j+=2
elif not lines[i].startswith('H'):
index = i-1
break
return cattrs,cols, index | def _readFile(self, fname, sldir) | private method that reads in and organizes the .DAT file
Loads the data of the .DAT File into the variables cattrs and cols.
In both these cases they are dictionaries, but in the case of cols,
it is a dictionary of numpy array exect for the element ,
element_name where it is just a list | 5.803196 | 2.90784 | 1.995707 |
numType=numtype.upper()
if numType == 'FILE':
#do nothing
return fname
elif numType == 'CYCNUM':
try:
fname = int(fname)
except ValueError:
print('Improper choice:'+ str(fname))
print('Reselecting as 0')
fname = 0
print('Using '+self.files[fname])
try:
return self.files[self.indexp_cyc2filels[fname]]
except IndexError:
mods = array(self.get('mod'), dtype=int)
if fname not in mods:
print('You seem to try to plot a cycle that is not present: '+str(fname))
fname = mods[-1]
print('I will assume you want to plot the last cycle in the run: '+str(fname))
print('[I am not 100% sure this escape is debugged. You better do this again with')
print('the correct input.]')
return self.files[fname] | def findFile(self, fname, numtype) | Function that finds the associated file for fname when Fname is
time or NDump.
Parameters
----------
fname : string
The name of the file we are looking for.
numType : string
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'cycNum',
this function will get the desired attribute from that file
with fname's model number. | 8.850872 | 8.603757 | 1.028722 |
'''
A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
# If the context does not contain a count parameter, this request has not
# been retried yet. Add the count parameter to track the number of retries.
if not hasattr(context, 'count'):
context.count = 0
# Determine whether to retry, and if so increment the count, modify the
# request as desired, and return the backoff.
if self._should_retry(context):
context.count += 1
# If retry to secondary is enabled, attempt to change the host if the
# request allows it
if self.retry_to_secondary:
self._set_next_host_location(context)
return backoff(context)
return None | def _retry(self, context, backoff) | A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None | 4.773135 | 2.723309 | 1.752697 |
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceStats>
<GeoReplication>
<Status>live|bootstrap|unavailable</Status>
<LastSyncTime>sync-time|<empty></LastSyncTime>
</GeoReplication>
</StorageServiceStats>
'''
if response is None or response.body is None:
return None
service_stats_element = ETree.fromstring(response.body)
geo_replication_element = service_stats_element.find('GeoReplication')
geo_replication = GeoReplication()
geo_replication.status = geo_replication_element.find('Status').text
geo_replication.last_sync_time = parser.parse(geo_replication_element.find('LastSyncTime').text)
service_stats = ServiceStats()
service_stats.geo_replication = geo_replication
return service_stats | def _convert_xml_to_service_stats(response) | <?xml version="1.0" encoding="utf-8"?>
<StorageServiceStats>
<GeoReplication>
<Status>live|bootstrap|unavailable</Status>
<LastSyncTime>sync-time|<empty></LastSyncTime>
</GeoReplication>
</StorageServiceStats> | 2.699086 | 1.630432 | 1.655442 |
fw_update_action = self._actions.update_firmware
if not fw_update_action:
raise (sushy.exceptions.
MissingActionError(action='#UpdateService.SimpleUpdate',
resource=self._path))
return fw_update_action | def _get_firmware_update_element(self) | Get the url for firmware update
:returns: firmware update url
:raises: Missing resource error on missing url | 8.690196 | 9.163855 | 0.948312 |
action_data = {
'ImageURI': file_url,
}
target_uri = self._get_firmware_update_element().target_uri
try:
self._conn.post(target_uri, data=action_data)
except sushy.exceptions.SushyError as e:
msg = (('The Redfish controller failed to update firmware '
'with file %(file)s Error %(error)s') %
{'file': file_url, 'error': str(e)})
LOG.debug(msg) # noqa
raise exception.IloError(msg)
self.wait_for_redfish_firmware_update_to_complete(redfish_inst)
try:
state, percent = self.get_firmware_update_progress()
except sushy.exceptions.SushyError as e:
msg = ('Failed to get firmware progress update '
'Error %(error)s' % {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
if state == "Error":
msg = 'Unable to update firmware'
LOG.debug(msg) # noqa
raise exception.IloError(msg)
elif state == "Unknown":
msg = 'Status of firmware update not known'
LOG.debug(msg) # noqa
else: # "Complete" | "Idle"
LOG.info('Flashing firmware file: %s ... done', file_url) | def flash_firmware(self, redfish_inst, file_url) | Perform firmware flashing on a redfish system
:param file_url: url to firmware bits.
:param redfish_inst: redfish instance
:raises: IloError, on an error from iLO. | 2.897887 | 2.943515 | 0.984499 |
p_state = ['Idle']
c_state = ['Idle']
def has_firmware_flash_completed():
curr_state, curr_percent = self.get_firmware_update_progress()
p_state[0] = c_state[0]
c_state[0] = curr_state
if (((p_state[0] in ['Updating', 'Verifying',
'Uploading', 'Writing'])
and (c_state[0] in ['Complete', 'Error',
'Unknown', 'Idle']))
or (p_state[0] == 'Idle' and (c_state[0] in
['Complete', 'Error']))):
return True
return False
common.wait_for_operation_to_complete(
has_firmware_flash_completed,
delay_bw_retries=30,
failover_msg='iLO firmware update has failed.'
)
common.wait_for_ilo_after_reset(redfish_object) | def wait_for_redfish_firmware_update_to_complete(self, redfish_object) | Continuously polls for iLO firmware update to complete.
:param redfish_object: redfish instance | 4.321634 | 4.54024 | 0.951852 |
# perform refresh
try:
self.refresh()
except sushy.exceptions.SushyError as e:
msg = (('Progress of firmware update not known. '
'Error %(error)s') %
{'error': str(e)})
LOG.debug(msg)
return "Unknown", "Unknown"
# NOTE: Percentage is returned None after firmware flash is completed.
return (self.firmware_state, self.firmware_percentage) | def get_firmware_update_progress(self) | Get the progress of the firmware update.
:returns: firmware update state, one of the following values:
"Idle","Uploading","Verifying","Writing",
"Updating","Complete","Error".
If the update resource is not found, then "Unknown".
:returns: firmware update progress percent | 7.087871 | 6.791448 | 1.043647 |
return BIOSPendingSettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) | def pending_settings(self) | Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 11.98697 | 8.792079 | 1.363383 |
return BIOSBootSettings(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "Boot"]),
redfish_version=self.redfish_version) | def boot_settings(self) | Property to provide reference to bios boot instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 8.198648 | 7.252156 | 1.130512 |
return iscsi.ISCSIResource(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "iScsi"]),
redfish_version=self.redfish_version) | def iscsi_resource(self) | Property to provide reference to bios iscsi resource instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 7.042327 | 6.677861 | 1.054578 |
return BIOSMappings(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "Mappings"]),
redfish_version=self.redfish_version) | def bios_mappings(self) | Property to provide reference to bios mappings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 7.18903 | 7.325027 | 0.981434 |
return BIOSBaseConfigs(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "BaseConfigs"]),
redfish_version=self.redfish_version) | def _get_base_configs(self) | Method that returns object of bios base configs. | 7.694677 | 5.315897 | 1.447484 |
bios_properties = {
'BootMode': mappings.GET_BIOS_BOOT_MODE_MAP_REV.get(boot_mode)
}
if boot_mode == sys_cons.BIOS_BOOT_MODE_UEFI:
bios_properties['UefiOptimizedBoot'] = 'Enabled'
self.update_bios_data_by_patch(bios_properties) | def set_pending_boot_mode(self, boot_mode) | Sets the boot mode of the system for next boot.
:param boot_mode: either sys_cons.BIOS_BOOT_MODE_LEGACY_BIOS,
sys_cons.BIOS_BOOT_MODE_UEFI. | 5.114661 | 4.531526 | 1.128684 |
bios_settings_data = {
'Attributes': data
}
self._conn.post(self.path, data=bios_settings_data) | def update_bios_data_by_post(self, data) | Update bios data by post
:param data: default bios config data | 6.668968 | 8.170454 | 0.81623 |
bios_settings_data = {
'Attributes': data
}
self._conn.patch(self.path, data=bios_settings_data) | def update_bios_data_by_patch(self, data) | Update bios data by patch
:param data: default bios config data | 6.163855 | 7.856835 | 0.784521 |
boot_string = None
if not self.persistent_boot_config_order or not self.boot_sources:
msg = ('Boot sources or persistent boot config order not found')
LOG.debug(msg)
raise exception.IloError(msg)
preferred_boot_device = self.persistent_boot_config_order[0]
for boot_source in self.boot_sources:
if ((boot_source.get("StructuredBootString") is not None) and (
preferred_boot_device ==
boot_source.get("StructuredBootString"))):
boot_string = boot_source["BootString"]
break
else:
msg = (('Persistent boot device failed, as no matched boot '
'sources found for device: %(persistent_boot_device)s')
% {'persistent_boot_device': preferred_boot_device})
LOG.debug(msg)
raise exception.IloError(msg)
for key, value in BOOT_SOURCE_TARGET_TO_PARTIAL_STRING_MAP.items():
for val in value:
if val in boot_string:
return key
return sushy.BOOT_SOURCE_TARGET_NONE | def get_persistent_boot_device(self) | Get current persistent boot device set for the host
:returns: persistent boot device for the system
:raises: IloError, on an error from iLO. | 3.419533 | 3.439946 | 0.994066 |
boot_sources = self.boot_sources
if not boot_sources:
msg = ('Boot sources are not found')
LOG.debug(msg)
raise exception.IloError(msg)
for boot_source in boot_sources:
if (mac.upper() in boot_source['UEFIDevicePath'] and
'iSCSI' in boot_source['UEFIDevicePath']):
return boot_source['StructuredBootString']
else:
msg = ('MAC provided "%s" is Invalid' % mac)
raise exception.IloInvalidInputError(msg) | def get_uefi_boot_string(self, mac) | Get uefi iscsi boot string for the host
:returns: iscsi boot string for the system
:raises: IloError, on an error from iLO. | 4.33855 | 4.413901 | 0.982929 |
'''
Molecular plasma viscosity (Spitzer 1962)
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
molecular diffusivity in [cm**2/s]
Notes
-----
According to Eq 22 in Schatzman (1977). Assume log Lambda = 15.
(see Table 5.1), a H/He mix (for different mix use Eq. 5.54 in
Spitzer text book)
Examples
--------
see astronomy.visc_rad_kap_sc
'''
visc_mol = 1.84e-17*(1.+7.*X)*(old_div(T**2.5,rho))
return visc_mol | def visc_mol_sol(T,rho,X) | Molecular plasma viscosity (Spitzer 1962)
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
molecular diffusivity in [cm**2/s]
Notes
-----
According to Eq 22 in Schatzman (1977). Assume log Lambda = 15.
(see Table 5.1), a H/He mix (for different mix use Eq. 5.54 in
Spitzer text book)
Examples
--------
see astronomy.visc_rad_kap_sc | 11.193857 | 1.773695 | 6.311037 |
'''
Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
'''
kappa = 0.2*(1.+X)
nu_rad = 6.88e-26*(old_div(T**4,(kappa*rho**2)))
return nu_rad | def visc_rad_kap_sc(T,rho,X) | Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2) | 5.368656 | 1.335619 | 4.019601 |
'''
Gamma1 for a mix of ideal gas and radiation
Hansen & Kawaler, page 177, Eqn. 3.110
Parameters
----------
beta : float
Gas pressure fraction Pgas/(Pgas+Prad)
'''
Gamma3minus1 = (old_div(2.,3.))*(4.-3.*beta)/(8.-7.*beta)
Gamma1 = beta + (4.-3.*beta) * Gamma3minus1
return Gamma1 | def Gamma1_gasrad(beta) | Gamma1 for a mix of ideal gas and radiation
Hansen & Kawaler, page 177, Eqn. 3.110
Parameters
----------
beta : float
Gas pressure fraction Pgas/(Pgas+Prad) | 10.94864 | 3.861648 | 2.835224 |
'''
P = R/mu * rho * T
Parameters
----------
mu : float
Mean molecular weight
rho : float
Density [cgs]
T : float
Temperature [K]
'''
R = old_div(boltzmann_constant, atomic_mass_unit)
return (old_div(R,mu)) * rho * T | def Pgas(rho,T,mu) | P = R/mu * rho * T
Parameters
----------
mu : float
Mean molecular weight
rho : float
Density [cgs]
T : float
Temperature [K] | 5.555414 | 3.284561 | 1.691372 |
''' Curvature MiMf from Ferrario etal. 2005MNRAS.361.1131.'''
mf=-0.00012336*mi**6+0.003160*mi**5-0.02960*mi**4+\
0.12350*mi**3-0.21550*mi**2+0.19022*mi+0.46575
return mf | def mimf_ferrario(mi) | Curvature MiMf from Ferrario etal. 2005MNRAS.361.1131. | 6.202009 | 3.921379 | 1.581589 |
'''
Returns
-------
N(M)dM
for given mass according to Kroupa IMF, vectorization
available via vimf()
'''
m1 = 0.08; m2 = 0.50
a1 = 0.30; a2 = 1.30; a3 = 2.3
const2 = m1**-a1 -m1**-a2
const3 = m2**-a2 -m2**-a3
if m < 0.08:
alpha = 0.3
const = -const2 -const3
elif m < 0.50:
alpha = 1.3
const = -const3
else:
alpha = 2.3
const = 0.0
# print m,alpha, const, m**-alpha + const
return m**-alpha + const | def imf(m) | Returns
-------
N(M)dM
for given mass according to Kroupa IMF, vectorization
available via vimf() | 5.334055 | 3.180345 | 1.677194 |
'''
Integrate IMF between m1 and m2.
Parameters
----------
m1 : float
Min mass
m2 : float
Max mass
m : float
Mass array
imf : float
IMF array
bywhat : string, optional
'bymass' integrates the mass that goes into stars of
that mass interval; or 'bynumber' which integrates the number
of stars in that mass interval. The default is 'bymass'.
integrate : string, optional
'normal' uses sc.integrate.trapz; 'cum' returns cumulative
trapezoidal integral. The default is 'normal'.
'''
ind_m = (m >= min(m1,m2)) & (m <= max(m1,m2))
if integral is 'normal':
int_func = sc.integrate.trapz
elif integral is 'cum':
int_func = sc.integrate.cumtrapz
else:
print("Error in int_imf_dm: don't know how to integrate")
return 0
if bywhat is 'bymass':
return int_func(m[ind_m]*imf[ind_m],m[ind_m])
elif bywhat is 'bynumber':
return int_func(imf[ind_m],m[ind_m])
else:
print("Error in int_imf_dm: don't know by what to integrate")
return 0 | def int_imf_dm(m1,m2,m,imf,bywhat='bymass',integral='normal') | Integrate IMF between m1 and m2.
Parameters
----------
m1 : float
Min mass
m2 : float
Max mass
m : float
Mass array
imf : float
IMF array
bywhat : string, optional
'bymass' integrates the mass that goes into stars of
that mass interval; or 'bynumber' which integrates the number
of stars in that mass interval. The default is 'bymass'.
integrate : string, optional
'normal' uses sc.integrate.trapz; 'cum' returns cumulative
trapezoidal integral. The default is 'normal'. | 2.730355 | 1.595106 | 1.711708 |
'''
orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity
'''
a_cm = a * rsun_cm
m1_g = m1 * msun_g
m2_g = m2 * msun_g
J_orb=np.sqrt(grav_const*a_cm*(old_div((m1_g**2*m2_g**2),(m1_g+m2_g))))*(1-e**2)
return J_orb | def am_orb(m1,m2,a,e) | orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity | 5.570163 | 3.014339 | 1.847888 |
'''
mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273
'''
Mdot = -5.65 + np.log10(old_div(L,10.**4)) -6.3*np.log10(old_div(Teff,3500.))
return Mdot | def mass_loss_loon05(L,Teff) | mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273 | 5.08424 | 2.290086 | 2.220109 |
'''
Parameters
----------
m1, m2 : float
M in Msun.
r : float
Distance in Rsun.
Returns
-------
Epot
Epot in erg.
'''
epo = -grav_const * m1 * m2 * msun_g**2 / (r * rsun_cm)
return epo | def energ_orb(m1,m2,r) | Parameters
----------
m1, m2 : float
M in Msun.
r : float
Distance in Rsun.
Returns
-------
Epot
Epot in erg. | 7.170113 | 3.910461 | 1.833572 |
A *= rsun_cm
print(A)
velocity = np.sqrt(grav_const*msun_g*(M1+M2)/A)
print(old_div(velocity,1.e5))
p = 2.*np.pi * A / velocity
p /= (60*60*24.)
return p | def period(A,M1,M2) | calculate binary period from separation.
Parameters
----------
A : float
separation A Rsun.
M1, M2 : float
M in Msun.
Returns
-------
p
period in days. | 8.7481 | 7.819973 | 1.118687 |
ve = np.sqrt(2.*grav_const*M*msun_g/(R*rsun_cm))
ve = ve*1.e-5
return ve | def escape_velocity(M,R) | escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s. | 9.207775 | 10.587363 | 0.869695 |
'''
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = macs*1.e-27
Nasv = s*vtherm*Na
return Nasv | def Nasv(macs,T) | Returns
-------
Na*<sigma v>
for MACS [mb] at T [K]. | 14.633921 | 6.593204 | 2.219546 |
'''
Returns
-------
MACS
[mb] at T [K] from Na*<sigma v>.
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = old_div(nasv,(vtherm*Na))
macs = s*1.e27
return macs | def macs(nasv,T) | Returns
-------
MACS
[mb] at T [K] from Na*<sigma v>. | 16.277479 | 8.075722 | 2.015607 |
'''
mean molecular weight per free electron, assuming full ionisation, and
approximating mu_i/Z_i ~ 2 for all elements heavier then Helium.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.8)
Parameters
----------
X : float
Mass fraction of H.
'''
try:
mu_e = old_div(2.,(1.+X))
except TypeError:
X=np.array([X])
mu_e = old_div(2.,(1.+X))
return mu_e | def mu_e(X) | mean molecular weight per free electron, assuming full ionisation, and
approximating mu_i/Z_i ~ 2 for all elements heavier then Helium.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.8)
Parameters
----------
X : float
Mass fraction of H. | 9.212391 | 2.12584 | 4.333529 |
'''
mean molecular weight assuming full ionisation.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.6)
Parameters
----------
X : float
Mass fraction vector.
Z : float
Charge number vector.
A : float
Mass number vector.
'''
if not isinstance(Z,np.ndarray):
Z = np.array(Z)
if not isinstance(A,np.ndarray):
A = np.array(A)
if not isinstance(X,np.ndarray):
X = np.array(X)
try:
mu = old_div(1.,sum(X*(1.+Z)/A))
except TypeError:
X=np.array([X])
A=np.array([A])
Z=np.array([Z])
mu = old_div(1.,sum(X*(1.+Z)/A))
return mu | def mu(X,Z,A) | mean molecular weight assuming full ionisation.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.6)
Parameters
----------
X : float
Mass fraction vector.
Z : float
Charge number vector.
A : float
Mass number vector. | 3.70102 | 2.049546 | 1.805775 |
'''
T(rho) that separates ideal gas and degenerate pressure dominated regions.
Kippenhahn & Weigert, Eq. 16.6
Parameters
----------
rho : float
Density array [cgs].
mu : float
Mean molecular weight.
mu_e : float
Mean molecular weight per free electron.
'''
T = 1.207E5 * rho**(old_div(2.,3.)) * mu / mu_e**(old_div(5.,3.))
return T | def Trho_iddeg(rho,mu,mu_e) | T(rho) that separates ideal gas and degenerate pressure dominated regions.
Kippenhahn & Weigert, Eq. 16.6
Parameters
----------
rho : float
Density array [cgs].
mu : float
Mean molecular weight.
mu_e : float
Mean molecular weight per free electron. | 9.201894 | 2.689039 | 3.422001 |
matching_physical_drives = []
criteria_to_consider = [x for x in FILTER_CRITERIA
if x in logical_disk]
for physical_drive_object in physical_drives:
for criteria in criteria_to_consider:
logical_drive_value = logical_disk.get(criteria)
physical_drive_value = getattr(physical_drive_object, criteria)
if logical_drive_value != physical_drive_value:
break
else:
matching_physical_drives.append(physical_drive_object)
return matching_physical_drives | def _get_criteria_matching_disks(logical_disk, physical_drives) | Finds the physical drives matching the criteria of logical disk.
This method finds the physical drives matching the criteria
of the logical disk passed.
:param logical_disk: The logical disk dictionary from raid config
:param physical_drives: The physical drives to consider.
:returns: A list of physical drives which match the criteria | 2.185999 | 2.41667 | 0.90455 |
size_gb = logical_disk['size_gb']
raid_level = logical_disk['raid_level']
number_of_physical_disks = logical_disk.get(
'number_of_physical_disks', constants.RAID_LEVEL_MIN_DISKS[raid_level])
share_physical_disks = logical_disk.get('share_physical_disks', False)
# Try to create a new independent array for this request.
for controller in server.controllers:
physical_drives = controller.unassigned_physical_drives
physical_drives = _get_criteria_matching_disks(logical_disk,
physical_drives)
if size_gb != "MAX":
# If we want to allocate for a logical disk for which size_gb is
# mentioned, we take the smallest physical drives which is required
# to match the criteria.
reverse_sort = False
physical_drives = [x for x in physical_drives
if x.size_gb >= size_gb]
else:
# If we want to allocate for a logical disk for which size_gb is
# MAX, we take the largest physical drives available.
reverse_sort = True
if len(physical_drives) >= number_of_physical_disks:
selected_drives = sorted(physical_drives, key=lambda x: x.size_gb,
reverse=reverse_sort)
selected_drive_ids = [x.id for x in selected_drives]
logical_disk['controller'] = controller.id
physical_disks = selected_drive_ids[:number_of_physical_disks]
logical_disk['physical_disks'] = physical_disks
return
# We didn't find physical disks to create an independent array.
# Check if we can get some shared arrays.
if share_physical_disks:
sharable_disk_wwns = []
for sharable_logical_disk in raid_config['logical_disks']:
if (sharable_logical_disk.get('share_physical_disks', False) and
'root_device_hint' in sharable_logical_disk):
wwn = sharable_logical_disk['root_device_hint']['wwn']
sharable_disk_wwns.append(wwn)
for controller in server.controllers:
sharable_arrays = [x for x in controller.raid_arrays if
x.logical_drives[0].wwn in sharable_disk_wwns]
for array in sharable_arrays:
# Check if criterias for the logical disk match the ones with
# physical disks in the raid array.
criteria_matched_disks = _get_criteria_matching_disks(
logical_disk, array.physical_drives)
# Check if all disks in the array don't match the criteria
if len(criteria_matched_disks) != len(array.physical_drives):
continue
# Check if raid array can accomodate the logical disk.
if array.can_accomodate(logical_disk):
logical_disk['controller'] = controller.id
logical_disk['array'] = array.id
return
# We check both options and couldn't get any physical disks.
raise exception.PhysicalDisksNotFoundError(size_gb=size_gb,
raid_level=raid_level) | def allocate_disks(logical_disk, server, raid_config) | Allocate physical disks to a logical disk.
This method allocated physical disks to a logical
disk based on the current state of the server and
criteria mentioned in the logical disk.
:param logical_disk: a dictionary of a logical disk
from the RAID configuration input to the module.
:param server: An objects.Server object
:param raid_config: The target RAID configuration requested.
:raises: PhysicalDisksNotFoundError, if cannot find
physical disks for the request. | 2.943588 | 2.908868 | 1.011936 |
'''
setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01.
'''
# reverse logarithmic time
logtimerev=np.arange(5.,-6.,delta_logt_dex)
logrho=np.linspace(0,8.5,len(logtimerev))
logT = (old_div(1.,3.))*(logrho + 21.9161 + np.log10(Sg))
#rho_6=10**logrho/(0.1213*1.e6)
#T9=rho_6**(1./3.)
#logT_T3=np.log10(T9*1.e9)
pl.close(3);pl.figure(3);pl.plot(logrho,logT,label='$S/\mathrm{N_Ak}='+str(Sg)+'$')
pl.legend(loc=2);pl.xlabel('$\log \\rho$'); pl.ylabel('$\log T$')
pl.close(5);pl.figure(5);pl.plot(logtimerev, logrho)
pl.xlabel('$\log (t_\mathrm{final}-t)$'); pl.ylabel('$\log \\rho$')
pl.xlim(8,-6)
pl.close(6);pl.figure(6);pl.plot(logtimerev)
pl.ylabel('$\log (t_\mathrm{final}-t)$'); pl.xlabel('cycle')
# [t] logtimerev yrs
# [rho] cgs
# [T] K
T9=old_div(10**logT,1.e9)
data=[logtimerev,T9,logrho]
att.writeTraj(filename='trajectory.input', data=data, ageunit=2, tunit=1, rhounit=1, idNum=1) | def trajectory_SgConst(Sg=0.1, delta_logt_dex=-0.01) | setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01. | 7.008698 | 3.621405 | 1.935353 |
'''
provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators.
'''
if what_list is "CNONe":
list_to_print = ['H-1','He-4','C-12','N-14','O-16','Ne-20']
elif what_list is "sprocess":
list_to_print = ['Fe-56','Ge-70','Zn-70','Se-76','Kr-80','Kr-82','Kr-86','Sr-88','Ba-138','Pb-208']
elif what_list is "burn_stages":
list_to_print = ['H-1','He-4','C-12','O-16','Ne-20','Si-28']
elif what_list is "list_marco_1":
list_to_print = ['C-12','O-16','Ne-20','Ne-22','Na-23','Fe-54','Fe-56','Zn-70','Ge-70','Se-76','Kr-80','Kr-82','Sr-88','Y-89','Zr-96','Te-124','Xe-130','Xe-134','Ba-138']
return list_to_print | def species_list(what_list) | provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators. | 4.450519 | 2.733419 | 1.628188 |
'''
provide one out of 25 unique combinations of style, color and mark
use in combination with markevery=a+mod(i,b) to add spaced points,
here a would be the base spacing that would depend on the data
density, modulated with the number of lines to be plotted (b)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])
(c) 2014 FH
'''
lines=['-','--','-.',':']
points=['v','^','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','o']
colors=['b','g','r','c','m','k']
ls_string = colors[sc.mod(i,6)]+lines[sc.mod(i,4)]+points[sc.mod(i,18)]
mark_i = a+sc.mod(i,b)
return ls_string,int(mark_i) | def linestyle(i,a=5,b=3) | provide one out of 25 unique combinations of style, color and mark
use in combination with markevery=a+mod(i,b) to add spaced points,
here a would be the base spacing that would depend on the data
density, modulated with the number of lines to be plotted (b)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])
(c) 2014 FH | 7.409951 | 1.903013 | 3.893799 |
'''
colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ
'''
rawRGBs = [(162,200,236),
(255,128,14),
(171,171,171),
(95,158,209),
(89,89,89),
(0,107,164),
(255,188,121),
(207,207,207),
(200,82,0),
(137,137,137)]
scaledRGBs = []
for r in rawRGBs:
scaledRGBs.append((old_div(r[0],255.),old_div(r[1],255.),old_div(r[2],255.)))
idx = sc.mod(i,len(scaledRGBs))
return scaledRGBs[idx] | def colourblind(i) | colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ | 4.2978 | 2.870872 | 1.497036 |
'''
another colour pallete from http://www.sron.nl/~pault/
allegedly suitable for colour-blind folk
SJ
'''
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',
'#CC6677', '#882255', '#AA4499']
idx = sc.mod(i,len(hexcols))
return hexcols[idx] | def colourblind2(i) | another colour pallete from http://www.sron.nl/~pault/
allegedly suitable for colour-blind folk
SJ | 4.754902 | 2.554427 | 1.861436 |
'''
version of linestyle function with colourblind colour scheme
returns linetyle, marker, color (see example)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),ls=linestyle(7)[0], marker=linestyle(7)[1], \
color=linestyle(7)[2],markevery=linestyle(7)[3])
(c) 2014 FH
'''
lines=['-','--','-.',':']
points=['v','^','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','o']
colors=['b','g','r','c','m','k']
col=colourblind(i)
style=lines[sc.mod(i,4)]
point=points[sc.mod(i,18)]
mark_i = a+sc.mod(i,b)
return style,point,col,mark_i | def linestylecb(i,a=5,b=3) | version of linestyle function with colourblind colour scheme
returns linetyle, marker, color (see example)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),ls=linestyle(7)[0], marker=linestyle(7)[1], \
color=linestyle(7)[2],markevery=linestyle(7)[3])
(c) 2014 FH | 6.987515 | 2.218158 | 3.150142 |
'''
provide default symbol lists
Parameters
----------
what_list : string
String name of symbol lists provided; "list1", "list2",
"lines1" or "lines2".
'''
if what_list is "list1":
symbol=['ro','bo','ko','go','mo'\
,'r-','b-','k-','g-','m-','r--','b--','k--'\
,'g--','r1']
#symbol=['r+','ro','r-']
elif what_list is "list2":
symbol=['r-','b--','g-.','k:','md','.','o','v','^','<','>','1','2',\
'3','4','s','p','*','h','H','+']
elif what_list is "lines1":
symbol=['b--','k--','r--','c--','m--','g--','b-','k-','r-','c-','m-','g-','b.','b-.','k-.','r-.','c-.','m-.','g-.','b:','k:','r:','c:','m:','g:']
elif what_list is "lines2":
symbol=['g:','r-.','k-','b--','k-.','b+','r:','b-','c--','m--','g--','r-','c-','m-','g-','k-.','c-.','m-.','g-.','k:','r:','c:','m:','b-.','b:']
return symbol | def symbol_list(what_list) | provide default symbol lists
Parameters
----------
what_list : string
String name of symbol lists provided; "list1", "list2",
"lines1" or "lines2". | 3.722733 | 3.012271 | 1.235856 |
'''
provide the list of symbols to use according for the list of
species/arrays to plot.
Parameters
----------
default_symbol_list : list
Symbols that the user choose to use.
len_list_to_print : integer
len of list of species/arrays to print.
'''
symbol_used = []
for i in range(len_list_to_print):
symbol_used.append(default_symbol_list[sc.mod(i,len(default_symbol_list))])
return symbol_used | def make_list(default_symbol_list, len_list_to_print) | provide the list of symbols to use according for the list of
species/arrays to plot.
Parameters
----------
default_symbol_list : list
Symbols that the user choose to use.
len_list_to_print : integer
len of list of species/arrays to print. | 4.726382 | 2.108541 | 2.241542 |
'''
bb is an index array which may have numerous double or triple
occurrences of indices, such as for example the decay_index_pointer.
This method removes all entries <= -, then all dublicates and
finally returns a sorted list of indices.
'''
cc=bb[np.where(bb>=0)]
cc.sort()
dc=cc[1:]-cc[:-1] # subsequent equal entries have 0 in db
dc=np.insert(dc,0,1) # the first element is always unique (the second occurence is the dublicate)
dc_mask=np.ma.masked_equal(dc,0)
return np.ma.array(cc,mask=dc_mask.mask).compressed() | def strictly_monotonic(bb) | bb is an index array which may have numerous double or triple
occurrences of indices, such as for example the decay_index_pointer.
This method removes all entries <= -, then all dublicates and
finally returns a sorted list of indices. | 10.127843 | 3.419542 | 2.961754 |
'''
read solar abundances from filename_solar.
Parameters
----------
filename_solar : string
The file name.
solar_factor : float
The correction factor to apply, in case filename_solar is not
solar, but some file used to get initial abundances at
metallicity lower than solar. However, notice that this is
really rude, since alpha-enahncements and things like that are
not properly considered. Only H and He4 are not multiplied. So,
for publications PLEASE use proper filename_solar at...solar,
and use solar_factor = 1. Marco
'''
f0=open(filename_solar)
sol=f0.readlines()
f0.close
sol[0].split(" ")
# Now read in the whole file and create a hashed array:
global names_sol
names_sol=[]
global z_sol
z_sol=[]
yps=np.zeros(len(sol))
mass_number=np.zeros(len(sol))
for i in range(len(sol)):
z_sol.append(int(sol[i][1:3]))
names_sol.extend([sol[i].split(" ")[0][4:]])
yps[i]=float(sol[i].split(" ")[1]) * solar_factor
try:
mass_number[i]=int(names_sol[i][2:5])
except ValueError:
print("WARNING:")
print("This initial abundance file uses an element name that does")
print("not contain the mass number in the 3rd to 5th position.")
print("It is assumed that this is the proton and we will change")
print("the name to 'h 1' to be consistent with the notation used in")
print("iniab.dat files")
names_sol[i]='h 1'
mass_number[i]=int(names_sol[i][2:5])
if mass_number[i] == 1 or mass_number[i] == 4:
yps[i] = old_div(yps[i],solar_factor)
# convert 'h 1' in prot, not needed any more??
#names_sol[0] = 'prot '
# now zip them together:
global solar_abundance
solar_abundance={}
for a,b in zip(names_sol,yps):
solar_abundance[a] = b
z_bismuth = 83
global solar_elem_abund
solar_elem_abund = np.zeros(z_bismuth)
for i in range(z_bismuth):
dummy = 0.
for j in range(len(solar_abundance)):
if z_sol[j] == i+1:
dummy = dummy + float(solar_abundance[names_sol[j]])
solar_elem_abund[i] = dummy | def solar(filename_solar, solar_factor) | read solar abundances from filename_solar.
Parameters
----------
filename_solar : string
The file name.
solar_factor : float
The correction factor to apply, in case filename_solar is not
solar, but some file used to get initial abundances at
metallicity lower than solar. However, notice that this is
really rude, since alpha-enahncements and things like that are
not properly considered. Only H and He4 are not multiplied. So,
for publications PLEASE use proper filename_solar at...solar,
and use solar_factor = 1. Marco | 5.514811 | 3.051084 | 1.807492 |
''' This just give back cl, that is the original index as it is read from files from a data file.'''
#connect the specie number in the list, with the specie name
global cl
cl={}
for a,b in zip(names_ppn_world,number_names_ppn_world):
cl[a] = b | def define_zip_index_for_species(names_ppn_world,
number_names_ppn_world) | This just give back cl, that is the original index as it is read from files from a data file. | 13.57332 | 4.457069 | 3.045347 |
'''
Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated
'''
# this way is done in a really simple way. May be done better for sure, in a couple of loops.
# I keep this, since I have only to copy over old script. Falk will probably redo it.
#import numpy as np
#from NuGridPy import utils as u
global elem_abund
elem_abund = np.zeros(z_bismuth)
global elem_abund_decayed
elem_abund_decayed = np.zeros(z_bismuth)
global elem_prod_fac
elem_prod_fac = np.zeros(z_bismuth)
global elem_prod_fac_decayed
elem_prod_fac_decayed = np.zeros(z_bismuth)
# notice that elem_abund include all contribution, both from stables and unstables in
# that moment.
for i in range(z_bismuth):
dummy = 0.
for j in range(len(spe)):
if znum_int[j] == i+1 and stable_isotope_identifier[j] > 0.5:
dummy = dummy + float(mass_fractions_array_not_decayed[j])
elem_abund[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac[i] = float(old_div(elem_abund[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac[i] = 0.
if i_decay == 2:
for i in range(z_bismuth):
dummy = 0.
for j in range(len(mass_fractions_array_decayed)):
if znum_int[cl[stable_isotope_list[j].capitalize()]] == i+1:
#print znum_int[cl[stable[j].capitalize()]],cl[stable[j].capitalize()],stable[j]
dummy = dummy + float(mass_fractions_array_decayed[j])
elem_abund_decayed[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac_decayed[i] = float(old_div(elem_abund_decayed[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac_decayed[i] = 0. | def element_abund_marco(i_decay, stable_isotope_list,
stable_isotope_identifier,
mass_fractions_array_not_decayed,
mass_fractions_array_decayed) | Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated | 3.571657 | 3.063594 | 1.165839 |
'''
Very simple Vfunction that gives the atomic number AS A STRING when given the element symbol.
Uses predefined a dictionnary.
Parameter :
z : string or number
For the other way, see get_z_from_el
'''
if(type(z)==float):
z=int(z)
if(type(z)==int):
z=str(z)
dict_z={'24': 'Cr', '25': 'Mn', '26': 'Fe', '27': 'Co', '20': 'Ca', '21': 'Sc', '22': 'Ti', '23': 'V', '28': 'Ni', '29': 'Cu', '4': 'Be', '8': 'O', '59': 'Pr', '58': 'Ce', '55': 'Cs', '54': 'Xe', '57': 'La', '56': 'Ba', '51': 'Sb', '50': 'Sn', '53': 'I', '52': 'Te', '88': 'Ra', '89': 'Ac', '82': 'Pb', '83': 'Bi', '80': 'Hg', '81': 'Tl', '86': 'Rn', '87': 'Fr', '84': 'Po', '85': 'At', '3': 'Li', '7': 'N', '39': 'Y', '38': 'Sr', '33': 'As', '32': 'Ge', '31': 'Ga', '30': 'Zn', '37': 'Rb', '36': 'Kr', '35': 'Br', '34': 'Se', '60': 'Nd', '61': 'Pm', '62': 'Sm', '63': 'Eu', '64': 'Gd', '65': 'Tb', '66': 'Dy', '67': 'Ho', '68': 'Er', '69': 'Tm', '2': 'He', '6': 'C', '91': 'Pa', '90': 'Th', '92': 'U', '11': 'Na', '10': 'Ne', '13': 'Al', '12': 'Mg', '15': 'P', '14': 'Si', '17': 'Cl', '16': 'S', '19': 'K', '18': 'Ar', '48': 'Cd', '49': 'In', '46': 'Pd', '47': 'Ag', '44': 'Ru', '45': 'Rh', '42': 'Mo', '43': 'Tc', '40': 'Zr', '41': 'Nb', '1': 'H', '5': 'B', '9': 'F', '77': 'Ir', '76': 'Os', '75': 'Re', '74': 'W', '73': 'Ta', '72': 'Hf', '71': 'Lu', '70': 'Yb', '79': 'Au', '78': 'Pt'}
return dict_z[z] | def get_el_from_z(z) | Very simple Vfunction that gives the atomic number AS A STRING when given the element symbol.
Uses predefined a dictionnary.
Parameter :
z : string or number
For the other way, see get_z_from_el | 2.718573 | 2.210283 | 1.229966 |
'''
performs the fit
x, y : list
Matching data arrays that define a numerical function y(x),
this is the data to be fitted.
dcoef : list or string
You can provide a different guess for the coefficients, or
provide the string 'none' to use the inital guess. The
default is 'none'.
Returns
-------
ierr
Values between 1 and 4 signal success.
Notes
-----
self.fcoef, contains the fitted coefficients.
'''
self.x = x
self.y = y
if dcoef is not 'none':
coef = dcoef
else:
coef = self.coef
fcoef=optimize.leastsq(self.residual,coef,args=(y,self.func,x))
self.fcoef = fcoef[0].tolist()
return fcoef[1] | def fit(self, x, y, dcoef='none') | performs the fit
x, y : list
Matching data arrays that define a numerical function y(x),
this is the data to be fitted.
dcoef : list or string
You can provide a different guess for the coefficients, or
provide the string 'none' to use the inital guess. The
default is 'none'.
Returns
-------
ierr
Values between 1 and 4 signal success.
Notes
-----
self.fcoef, contains the fitted coefficients. | 6.628553 | 1.851839 | 3.579444 |
'''
plot the data and the fitted function.
Parameters
----------
ifig : integer
Figure window number. The default is 1.
data_label : string
Legend for data. The default is 'data'.
fit_label : string
Legend for fit. If fit_lable is 'fit', then substitute fit
function type self.func_name. The default is 'fit'.
data_shape : character
Shape for data. The default is 'o'.
fit_shape : character
Shape for fit. The default is '-'.
'''
if len(self.coef) is not len(self.fcoef):
print("Warning: the fitted coefficient list is not same")
print(" length as guessed list - still I will try ...")
pl.figure(ifig)
pl.plot(self.x,self.y,data_shape,label=data_label)
if fit_label is 'fit':
fit_label=self.__name__
pl.plot(self.x,self.func(self.fcoef,self.x),fit_shape,label=fit_label)
pl.legend() | def plot(self, ifig=1, data_label='data', fit_label='fit',
data_shape='o', fit_shape='-') | plot the data and the fitted function.
Parameters
----------
ifig : integer
Figure window number. The default is 1.
data_label : string
Legend for data. The default is 'data'.
fit_label : string
Legend for fit. If fit_lable is 'fit', then substitute fit
function type self.func_name. The default is 'fit'.
data_shape : character
Shape for data. The default is 'o'.
fit_shape : character
Shape for fit. The default is '-'. | 3.361797 | 2.089991 | 1.608523 |
'''
This private method extracts the element names from stable_el.
Note that stable_names is a misnomer as stable_el also contains
unstable element names with a number 999 for the *stable* mass
numbers. (?!??)
'''
stable_names=[]
for i in range(len(self.stable_el)):
stable_names.append(self.stable_el[i][0])
self.stable_names=stable_names | def _stable_names(self) | This private method extracts the element names from stable_el.
Note that stable_names is a misnomer as stable_el also contains
unstable element names with a number 999 for the *stable* mass
numbers. (?!??) | 8.069145 | 1.660993 | 4.858026 |
'''
This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers.
'''
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
tmp=[]
isom=[]
for i in range(len(a)):
if z[i]!=0 and isomers[i]==1: #if its not 'NEUt and not an isomer'
tmp.append([self.stable_names[int(z[i])]+'-'+str(int(a[i])),yps[i],z[i],a[i]])
elif isomers[i]!=1: #if it is an isomer
if yps[i]==0:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),1e-99])
else:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),yps[i]])
tmp.sort(key = cmp_to_key(self.compar))
tmp.sort(key = cmp_to_key(self.comparator))
abunds=[]
isotope_to_plot=[]
z_iso_to_plot=[]
a_iso_to_plot=[]
el_iso_to_plot=[]
for i in range(len(tmp)):
isotope_to_plot.append(tmp[i][0])
abunds.append(tmp[i][1])
z_iso_to_plot.append(int(tmp[i][2]))
a_iso_to_plot.append(int(tmp[i][3]))
el_iso_to_plot.append(self.stable_names[int(tmp[i][2])])
return a_iso_to_plot,z_iso_to_plot,abunds,isotope_to_plot,el_iso_to_plot,isom | def _process_abundance_vector(self, a, z, isomers, yps) | This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers. | 2.131726 | 1.772163 | 1.202895 |
'''
simple comparator method
'''
indX=0
indY=0
a= int(x[0].split('-')[1])
b= int(y[0].split('-')[1])
if a>b:
return 1
if a==b:
return 0
if a<b:
return -1 | def compar(self, x, y) | simple comparator method | 4.809027 | 4.00223 | 1.201587 |
'''
simple comparator method
'''
indX=0
indY=0
for i in range(len(self.stable_names)):
if self.stable_names[i] == x[0].split('-')[0]:
indX=i
if self.stable_names[i] == y[0].split('-')[0]:
indY=i
if indX>indY:
return 1
if indX==indY:
return 0
if indX<indY:
return -1 | def comparator(self, x, y) | simple comparator method | 2.801151 | 2.555913 | 1.095949 |
'''
This private method reads the isotopedatabase.txt file in sldir
run dictory and returns z, a, elements, the cutoff mass for each
species that delineate beta+ and beta- decay and the logical in
the last column. Also provides charge_from_element dictionary
according to isotopedatabase.txt.
'''
name=self.sldir+ffname
z_db, a_db, el_db, stable_a_db,logic_db=\
np.loadtxt(name,unpack=True,dtype='str')
z_db=np.array(z_db,dtype='int')
a_db=np.array(a_db,dtype='int')
stable_a_db=np.array(stable_a_db,dtype='int')
# charge number for element name from dictionary in isotopedatabase.txt
charge_from_element_name={}
for name in self.stable_names:
if name=='Neutron' or name=='Neut' or name=='NEUT' or name=='N-1':
name='nn'
try:
zz=z_db[np.where(el_db==name)][0]
charge_from_element_name[name]=zz
except IndexError:
print(name+" does not exist in this run")
return z_db, a_db, el_db, stable_a_db,logic_db,charge_from_element_name | def _read_isotopedatabase(self, ffname='isotopedatabase.txt') | This private method reads the isotopedatabase.txt file in sldir
run dictory and returns z, a, elements, the cutoff mass for each
species that delineate beta+ and beta- decay and the logical in
the last column. Also provides charge_from_element dictionary
according to isotopedatabase.txt. | 5.737697 | 2.616564 | 2.192837 |
'''
This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013.
'''
element_name_of_iso = species.split('-')[0]
try:
a_of_iso = int(species.split('-')[1])
except ValueError: # if the species name contains in addition to the
# mass number some letters, e.g. for isomere, then
# we assume it is unstable. This is not correct but
# related to the fact that in nugridse.py we do not
# identify species properly by the three numbers A, Z
# and isomeric_state. We should do that!!!!!!
a_of_iso = 999
idp_of_element_in_stable_names = self.stable_names.index(element_name_of_iso)
if a_of_iso in self.stable_el[idp_of_element_in_stable_names][1:]:
return True
else:
return False | def is_stable(self,species) | This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013. | 9.730852 | 3.867139 | 2.516292 |
'''
Write initial abundance file (intended for use with ppn)
Parameters
----------
outfile : string
Name of output file. The default is
'initial_abundance.dat'.
header_string : string
A string with header line. The default is
'initial abundances for a PPN run'.
'''
dcols=['Z', 'species','mass fraction']
data=[self.z,self.names,self.abu]
hd=[header_string]
att.write(outfile,hd,dcols,data) | def write(self, outfile='initial_abundance.dat',
header_string='initial abundances for a PPN run') | Write initial abundance file (intended for use with ppn)
Parameters
----------
outfile : string
Name of output file. The default is
'initial_abundance.dat'.
header_string : string
A string with header line. The default is
'initial abundances for a PPN run'. | 4.865816 | 3.01358 | 1.61463 |
'''
species_hash is a hash array in which you provide abundances
referenced by species names that you want to set to some
particular value; all other species are then normalised so that
the total sum is 1.
Examples
--------
You can set up the argument array for this method for example
in the following way.
>>> sp={}
>>> sp['he 4']=0.2
>>> sp['h 1']=0.5
'''
sum_before = sum(self.abu)
for i in range(len(species_hash)):
sum_before -= self.abu[self.hindex[list(species_hash.keys())[i]]]
print("sum_before = "+str(sum_before))
normalization_factor=old_div(1.0-sum(species_hash.values()),sum_before)
print("normalizing the rest witih factor "+str(normalization_factor))
self.abu *= normalization_factor
for i in range(len(species_hash)):
self.abu[self.hindex[list(species_hash.keys())[i]]]=list(species_hash.values())[i]
for l in range(len(self.abu)):
if self.abu[l] <= 1e-99: #otherwise we might write e-100 which will be read as e-10 by ppn
self.abu[l] = 1.0e-99
for name in self.habu:
self.habu[name]=self.abu[self.hindex[name]] | def set_and_normalize(self,species_hash) | species_hash is a hash array in which you provide abundances
referenced by species names that you want to set to some
particular value; all other species are then normalised so that
the total sum is 1.
Examples
--------
You can set up the argument array for this method for example
in the following way.
>>> sp={}
>>> sp['he 4']=0.2
>>> sp['h 1']=0.5 | 5.162033 | 2.64848 | 1.949055 |
'''
This file returns the isotopic ratio of two isotopes specified
as iso1 and iso2. The isotopes are given as, e.g.,
['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility)
-> list.
'''
if len(isos) == 2:
dumb = []
dumb = isos[0].split('-')
dumb.append(isos[1].split('-')[0])
dumb.append(isos[1].split('-')[1])
isos = dumb
ssratio = old_div(self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)], self.habu[isos[2].ljust(2).lower() + str(int(isos[3])).rjust(3)])
return ssratio | def isoratio_init(self,isos) | This file returns the isotopic ratio of two isotopes specified
as iso1 and iso2. The isotopes are given as, e.g.,
['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility)
-> list. | 4.213073 | 2.294061 | 1.836513 |
'''
This routine returns the abundance of a specific isotope.
Isotope given as, e.g., 'Si-28' or as list
['Si-28','Si-29','Si-30']
'''
if type(isos) == list:
dumb = []
for it in range(len(isos)):
dumb.append(isos[it].split('-'))
ssratio = []
isos = dumb
for it in range(len(isos)):
ssratio.append(self.habu[isos[it][0].ljust(2).lower() + str(int(isos[it][1])).rjust(3)])
else:
isos = isos.split('-')
ssratio = self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)]
return ssratio | def iso_abundance(self,isos) | This routine returns the abundance of a specific isotope.
Isotope given as, e.g., 'Si-28' or as list
['Si-28','Si-29','Si-30'] | 3.191873 | 2.221054 | 1.437099 |
drv_rot_speed_rpm = set()
for member in self.get_members():
if member.rotational_speed_rpm is not None:
drv_rot_speed_rpm.add(member.rotational_speed_rpm)
return drv_rot_speed_rpm | def drive_rotational_speed_rpm(self) | Gets the set of rotational speed of the HDD drives | 2.592242 | 2.46178 | 1.052995 |
'''
Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database.
'''
sp = iso.split('-')
output = sp[1] + sp[0]
return output.lower() | def iso_name_converter(iso) | Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database. | 14.584093 | 2.657452 | 5.487998 |
'''
This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run.
'''
svnpathtmp = __file__
splitsvnpath = svnpathtmp.split('/')
if len(splitsvnpath) == 1:
svnpath = os.path.abspath('.') + '/../../'
else:
svnpath = ''
for i in range(len(splitsvnpath)-3):
svnpath += splitsvnpath[i] + '/'
return svnpath | def get_svnpath() | This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run. | 4.533377 | 2.582508 | 1.755416 |
'''
Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it.
'''
self.header_desc = self._header_desc
self.header_data = self._header_data
self.header_style = self._header_style
self.desc = self._desc
self.data = self._data
self.style = self._style
self.descdict = self._descdict
self.datadict = self._datadict
self.styledict = self._styledict | def reset_filter(self) | Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it. | 4.345168 | 2.310536 | 1.880589 |
'''
This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False.
'''
# create a list with all graintypes
gtype_info = []
group_info = []
ref_info = []
phase_info = []
# how many grains in database
print('There are ' + str(len(self.data)) + ' grains in your database.\n')
# graintypes
if graintype:
for i in range(len(self.desc)):
gtype_tmp = self.desc[i][self.descdict['Type']]
wrtchk = True
for j in range(len(gtype_info)):
if gtype_info[j] == gtype_tmp:
wrtchk = False
break
if wrtchk:
gtype_info.append(gtype_tmp)
print('Available graintypes are:')
print('-------------------------')
print(gtype_info)
# groups
if group:
for i in range(len(self.desc)):
group_tmp = self.desc[i][self.descdict['Group']]
wrtchk = True
for j in range(len(group_info)):
if group_info[j] == group_tmp:
wrtchk = False
break
if wrtchk:
group_info.append(group_tmp)
print('\nAvailable groups of grains (for silicates and oxides) are:')
print('----------------------------------------------------------')
print(group_info)
# Phases
if phase:
for i in range(len(self.desc)):
phase_tmp = self.desc[i][self.descdict['Phase']]
wrtchk = True
for j in range(len(phase_info)):
if phase_info[j] == phase_tmp:
wrtchk = False
break
if wrtchk:
phase_info.append(phase_tmp)
print('\nAvailable Phases of grains are:')
print('----------------------------------------------------------')
print(phase_info)
# references
if reference:
for i in range(len(self.desc)):
ref_tmp = self.desc[i][self.descdict['Reference']]
wrtchk = True
for j in range(len(ref_info)):
if ref_info[j] == ref_tmp:
wrtchk = False
break
if wrtchk:
ref_info.append(ref_tmp)
print('\nReferences for grains:')
print('----------------------')
print(ref_info) | def info(self, graintype=True, group=True, reference=False,
phase=True) | This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False. | 2.323737 | 1.668733 | 1.392516 |
'''
Private function to filter data, goes with filter_desc
'''
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
style_tmp= np.zeros((len(indexing),len(self.header_style)),dtype='|S1024')
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[indexing[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
else:
print('No filter selected or no data found!') | def _filter_desc(self, indexing) | Private function to filter data, goes with filter_desc | 2.34991 | 2.020721 | 1.162907 |
'''
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
'''
my_index = 0
my_grains = [['Index','Label','Type','Group','Meteorite','Mineralogy','C12/C13','d(Si29/Si30)','d(Si30/Si29)']]
# add the data to this grain list
for it in range(len(self.data)):
my_grains.append([my_index,self.desc[it][self.descdict['Grain Label']], self.desc[it][self.descdict['Type']], self.desc[it][self.descdict['Group']], self.desc[it][self.descdict['Meteorite']], self.desc[it][self.descdict['Mineralogy']], self.data[it][self.datadict['12c/13c']], self.data[it][self.datadict['d(29si/28si)']], self.data[it][self.datadict['d(30si/28si)']]])
my_index += 1
for prt_line in my_grains:
print(prt_line)
# now write the selector for the index of the grains to select which one should be
# available and which ones should be dumped
usr_input = ''
usr_input = input('Select the grains by index that you want to use. Please separate the indeces by a comma, e.g., 1 or 0,2,3,4\n')
# process user index
if usr_input == '':
print('No data selected to filter.')
return None
elif len(usr_input) == 1:
usr_index = [usr_input]
else:
usr_index = usr_input.split(',')
for it in range(len(usr_index)):
usr_index[it] = int(usr_index[it])
# filter
desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(usr_index),len(self.header_data)))
style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype='|S1024')
for i in range(len(usr_index)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[usr_index[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[usr_index[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[usr_index[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp | def filter_single_grain(self) | This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine | 3.778318 | 2.578305 | 1.465427 |
'''
This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True.
'''
# check availability
dat_index, delta_b, ratio_b = self.check_availability(isos)
if dat_index == -1:
print('Isotopes selected are not available. Check i.datadict (where i is your instance) for availability of isotopes.')
return None
# select if larger or smaller and define limit
if limit[0:1] == '>':
comperator = 'gt'
elif limit[0:1] == '<':
comperator = 'st'
else:
print('Comperator not specified. Limit must be given as \'>5.\' for example.')
return None
try:
limit = float(limit[1:len(limit)])
except ValueError:
print('Limit must be given as \'>5.\' for example.')
return None
# now calculate the actual limit to compare with, depending on if it delta or not or whatsoever
if delta == delta_b: # input and available same
if ratio_b: # one over
if delta:
tmp = self.delta_to_ratio(isos,limit,oneover=True)
comp_lim = self.ratio_to_delta(isos,tmp) # check
else:
comp_lim = old_div(1.,limit) # check
else: # all fine
comp_lim = limit
else: # input and availability not the same
if ratio_b: # one over
if delta: # delta given, ratio one over wanted
comp_lim = self.delta_to_ratio(isos,limit,oneover=True)
else: # ratio given, delta one over wanted
comp_lim = self.ratio_to_delta(isos,limit,oneover=True)
else: # not one over
if delta: # delta given, ratio wanted
comp_lim = self.delta_to_ratio(isos,limit)
else:
comp_lim = self.ratio_to_delta(isos,limit)
# indexing vector
indexing = []
for i in range(len(self.data)):
dat_val = self.data[i][dat_index]
if comperator == 'st':
if dat_val < comp_lim:
indexing.append(i)
else:
if dat_val > comp_lim:
indexing.append(i)
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
self.desc = desc_tmp
self.data = data_tmp
else:
print('No filter selected!') | def filter_data(self, isos, limit, delta=True) | This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True. | 3.663375 | 2.694062 | 1.359796 |
'''
This routine changes the plotting style that is set by default.
The style is changed according the the label that you choose.
Changing according to reference, use style_chg_ref() function!
You can change it back to default by resetting the filter using
g.reset_filter() routine, assuming that g is your instance. The
format that is used here is:
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
type : string
Select the label of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. Watch out, if you
want to do more specifications later, the type will
have changed to the new label.
'''
# do stuff for selected type
for i in range(len(self.style)):
# check if type is correct, otherwise continue directly
if self.style[i][self.styledict['Label']] == type:
# change symbol:
if symb != None:
self.style[i][self.styledict['Symbol']] = symb
# change edge color
if edc != None:
self.style[i][self.styledict['Edge color']] = edc
# change face color
if fac != None:
self.style[i][self.styledict['Face color']] = fac
# change symbol size
if smbsz != None:
self.style[i][self.styledict['Symbol size']] = smbsz
# change edge width
if edw != None:
self.style[i][self.styledict['Edge width']] = edw
# change label
if lab != None:
self.style[i][self.styledict['Label']] = lab | def style_chg_label(self,type,symb=None,edc=None,fac=None,smbsz=None,edw=None,lab=None) | This routine changes the plotting style that is set by default.
The style is changed according the the label that you choose.
Changing according to reference, use style_chg_ref() function!
You can change it back to default by resetting the filter using
g.reset_filter() routine, assuming that g is your instance. The
format that is used here is:
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
type : string
Select the label of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. Watch out, if you
want to do more specifications later, the type will
have changed to the new label. | 3.85351 | 1.300826 | 2.962356 |
'''
This routine changes the plotting style that is set by default.
The style is changed according the the reference of the paper
as given in the grain database. For change according to type of
grain, use the routine syle_chg_label().
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
ref : string
Select the reference of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change.
'''
# do stuff for selected reference
for i in range(len(self.style)):
# check if reference is correct, otherwise continue directly
if self.desc[i][self.descdict['Reference']] == ref:
# change symbol:
if symb != None:
self.style[i][self.styledict['Symbol']] = symb
# change edge color
if edc != None:
self.style[i][self.styledict['Edge color']] = edc
# change face color
if fac != None:
self.style[i][self.styledict['Face color']] = fac
# change symbol size
if smbsz != None:
self.style[i][self.styledict['Symbol size']] = smbsz
# change edge width
if edw != None:
self.style[i][self.styledict['Edge width']] = edw
# change label
if lab != None:
self.style[i][self.styledict['Label']] = lab | def style_chg_ref(self,ref,symb=None,edc=None,fac=None,smbsz=None,edw=None,lab=None) | This routine changes the plotting style that is set by default.
The style is changed according the the reference of the paper
as given in the grain database. For change according to type of
grain, use the routine syle_chg_label().
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
ref : string
Select the reference of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. | 3.301814 | 1.373962 | 2.403133 |
'''
This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not
'''
# make names
iso1name = iso_name_converter(isos[0])
iso2name = iso_name_converter(isos[1])
ratio = iso1name + '/' + iso2name
ratio_inv = iso2name + '/' + iso1name
delta = 'd(' + iso1name + '/' + iso2name + ')'
delta_inv = 'd(' + iso2name + '/' + iso1name + ')'
index = -1
# search for data entry
try:
index = self.datadict[ratio]
delta_b = False
ratio_b = False
except KeyError:
try:
index = self.datadict[ratio_inv]
delta_b = False
ratio_b = True
except KeyError:
try:
index = self.datadict[delta]
delta_b = True
ratio_b = False
except KeyError:
try:
index = self.datadict[delta_inv]
delta_b = True
ratio_b = True
except KeyError:
index = -1
delta_b = None
ratio_b = None
return index, delta_b, ratio_b | def check_availability(self, isos) | This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not | 3.02274 | 1.676076 | 1.803462 |
'''
Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value
'''
# define if isos_ss is the ratio or the isotopes
if type(isos_ss) == float:
ss_ratio = isos_ss
elif type(isos_ss) == list:
ss_ratio = self.inut.isoratio_init(isos_ss)
else:
print('Check input of isos_ss into ratio_to_delta routine')
return None
# check if one over is necessary or not
if oneover:
ratio = old_div(1,ratio)
# calculate delta value
delta = (old_div(ratio, ss_ratio) - 1.) * 1000.
return delta | def ratio_to_delta(self, isos_ss, ratio, oneover=False) | Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value | 4.669556 | 2.326797 | 2.00686 |
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient`
'''
res = copy.deepcopy(self)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res | def with_filter(self, filter) | Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient` | 5.770224 | 1.552116 | 3.71765 |
'''
Sends the request and return response. Catches HTTPError and hands it
to error handler
'''
try:
resp = self._filter(request)
if sys.version_info >= (3,) and isinstance(resp, bytes) and \
encoding:
resp = resp.decode(encoding)
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
except HTTPError as ex:
_storage_error_handler(ex)
# Wrap all other exceptions as AzureExceptions to ease exception handling code
except Exception as ex:
if sys.version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
raise AzureException
else:
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
raise AzureException('{}: {}'.format(ex.__class__.__name__, ex.args[0]))
return resp | def _perform_request(self, request, encoding='utf-8') | Sends the request and return response. Catches HTTPError and hands it
to error handler | 8.388593 | 6.73937 | 1.244715 |
cred = self.snmp_credentials
if cred is not None:
if cred.get('snmp_inspection') is True:
if not all([cred.get('auth_user'),
cred.get('auth_prot_pp'),
cred.get('auth_priv_pp')]):
msg = self._('Either few or all mandatory '
'SNMP credentials '
'are missing.')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
try:
auth_protocol = cred['auth_protocol']
if auth_protocol not in ["SHA", "MD5"]:
msg = self._('Invalid SNMP auth protocol '
'provided. '
'Valid values are SHA or MD5')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Auth protocol not provided by user. '
'The default value of MD5 will '
'be considered.')
LOG.debug(msg)
pass
try:
priv_protocol = cred['priv_protocol']
if priv_protocol not in ["AES", "DES"]:
msg = self._('Invalid SNMP privacy protocol '
'provided. '
'Valid values are AES or DES')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Privacy protocol not provided '
'by user. '
'The default value of DES will '
'be considered.')
LOG.debug(msg)
pass
else:
LOG.debug(self._('snmp_inspection set to False. SNMP'
'inspection will not be performed.'))
else:
LOG.debug(self._('SNMP credentials not provided. SNMP '
'inspection will not be performed.')) | def _validate_snmp(self) | Validates SNMP credentials.
:raises exception.IloInvalidInputError | 2.378495 | 2.296817 | 1.035561 |
if self.use_redfish_only:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
raise NotImplementedError()
else:
the_operation_object = self.ribcl
if 'Gen10' in self.model:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
if (self.is_ribcl_enabled is not None
and not self.is_ribcl_enabled):
raise NotImplementedError()
elif ('Gen9' in self.model) and (method_name in
SUPPORTED_RIS_METHODS):
the_operation_object = self.ris
method = getattr(the_operation_object, method_name)
LOG.debug(self._("Using %(class)s for method %(method)s."),
{'class': type(the_operation_object).__name__,
'method': method_name})
return method(*args, **kwargs) | def _call_method(self, method_name, *args, **kwargs) | Call the corresponding method using RIBCL, RIS or REDFISH
Make the decision to invoke the corresponding method using RIBCL,
RIS or REDFISH way. In case of none, throw out ``NotImplementedError`` | 3.185365 | 2.665089 | 1.195219 |
return self._call_method('set_iscsi_info', target_name, lun,
ip_address, port, auth_method, username,
password) | def set_iscsi_info(self, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | 2.387786 | 3.13638 | 0.761319 |
LOG.warning("'set_iscsi_boot_info' is deprecated. The 'MAC' parameter"
"passed in is ignored. Use 'set_iscsi_info' instead.")
return self._call_method('set_iscsi_info', target_name, lun,
ip_address, port, auth_method, username,
password) | def set_iscsi_boot_info(self, mac, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | 4.191678 | 4.33966 | 0.9659 |
return self._call_method('set_vm_status', device, boot_option,
write_protect) | def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES') | Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media. | 3.259615 | 4.931328 | 0.661001 |
data = self._call_method('get_essential_properties')
if (data['properties']['local_gb'] == 0):
cred = self.snmp_credentials
if cred and cred.get('snmp_inspection'):
disksize = snmp.get_local_gb(self.host, cred)
if disksize:
data['properties']['local_gb'] = disksize
else:
msg = self._('SNMP inspection failed to '
'get the disk size. Returning '
'local_gb as 0.')
LOG.debug(msg)
else:
msg = self._("SNMP credentials were not set and "
"RIBCL/Redfish failed to get the disk size. "
"Returning local_gb as 0.")
LOG.debug(msg)
return data | def get_essential_properties(self) | Get the essential scheduling properties
:returns: a dictionary containing memory size, disk size,
number of cpus, cpu arch, port numbers and
mac addresses.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | 5.086251 | 4.47202 | 1.13735 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.