sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _readPPN(self, fname, sldir):
'''
Private method that reads in and organizes the .ppn file
Loads the data of the .ppn file into the variable cols.
'''
if sldir.endswith(os.sep):
#Making sure fname will be formatted correctly
fname = str(sldir)+str(fname)
else:
fname = str(sldir)+os.sep+str(fname)
self.sldir+=os.sep
f=open(fname,'r')
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
cols = ['ISOTP', 'ABUNDANCE_MF'] #These are constant, .ppn files have no header to read from
for i in range(len(lines)):
if not lines[i].startswith('H'):
index = i-1
break
return cols, index | Private method that reads in and organizes the .ppn file
Loads the data of the .ppn file into the variable cols. | entailment |
def _readFile(self, fname, sldir):
'''
private method that reads in and organizes the .DAT file
Loads the data of the .DAT File into the variables cattrs and cols.
In both these cases they are dictionaries, but in the case of cols,
it is a dictionary of numpy array exect for the element ,
element_name where it is just a list
'''
cattrs=[]
if sldir.endswith(os.sep):
#Making sure fname will be formatted correctly
fname = str(sldir)+str(fname)
else:
fname = str(sldir)+os.sep+str(fname)
self.sldir+=os.sep
f=open(fname,'r')
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
cols=lines[0].strip('H')
cols=cols.strip()
cols=cols.split()
for i in range(len(lines)):
if lines[i].startswith('#'):
# if it is a cycle attribute line
lines[i]=lines[i].strip('#')
tmp=lines[i].split()
tmp1=[]
for j in range(len(tmp)):
if tmp[j] != '=' or '':
tmp1.append(tmp[j])
tmp=tmp1
j=0
while j <len(tmp):
cattrs.append(tmp[j])
j+=2
elif not lines[i].startswith('H'):
index = i-1
break
return cattrs,cols, index | private method that reads in and organizes the .DAT file
Loads the data of the .DAT File into the variables cattrs and cols.
In both these cases they are dictionaries, but in the case of cols,
it is a dictionary of numpy array exect for the element ,
element_name where it is just a list | entailment |
def findFile(self, fname, numtype):
"""
Function that finds the associated file for fname when Fname is
time or NDump.
Parameters
----------
fname : string
The name of the file we are looking for.
numType : string
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'cycNum',
this function will get the desired attribute from that file
with fname's model number.
"""
numType=numtype.upper()
if numType == 'FILE':
#do nothing
return fname
elif numType == 'CYCNUM':
try:
fname = int(fname)
except ValueError:
print('Improper choice:'+ str(fname))
print('Reselecting as 0')
fname = 0
print('Using '+self.files[fname])
try:
return self.files[self.indexp_cyc2filels[fname]]
except IndexError:
mods = array(self.get('mod'), dtype=int)
if fname not in mods:
print('You seem to try to plot a cycle that is not present: '+str(fname))
fname = mods[-1]
print('I will assume you want to plot the last cycle in the run: '+str(fname))
print('[I am not 100% sure this escape is debugged. You better do this again with')
print('the correct input.]')
return self.files[fname] | Function that finds the associated file for fname when Fname is
time or NDump.
Parameters
----------
fname : string
The name of the file we are looking for.
numType : string
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'cycNum',
this function will get the desired attribute from that file
with fname's model number. | entailment |
def _retry(self, context, backoff):
'''
A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
# If the context does not contain a count parameter, this request has not
# been retried yet. Add the count parameter to track the number of retries.
if not hasattr(context, 'count'):
context.count = 0
# Determine whether to retry, and if so increment the count, modify the
# request as desired, and return the backoff.
if self._should_retry(context):
context.count += 1
# If retry to secondary is enabled, attempt to change the host if the
# request allows it
if self.retry_to_secondary:
self._set_next_host_location(context)
return backoff(context)
return None | A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None | entailment |
def _convert_xml_to_service_stats(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceStats>
<GeoReplication>
<Status>live|bootstrap|unavailable</Status>
<LastSyncTime>sync-time|<empty></LastSyncTime>
</GeoReplication>
</StorageServiceStats>
'''
if response is None or response.body is None:
return None
service_stats_element = ETree.fromstring(response.body)
geo_replication_element = service_stats_element.find('GeoReplication')
geo_replication = GeoReplication()
geo_replication.status = geo_replication_element.find('Status').text
geo_replication.last_sync_time = parser.parse(geo_replication_element.find('LastSyncTime').text)
service_stats = ServiceStats()
service_stats.geo_replication = geo_replication
return service_stats | <?xml version="1.0" encoding="utf-8"?>
<StorageServiceStats>
<GeoReplication>
<Status>live|bootstrap|unavailable</Status>
<LastSyncTime>sync-time|<empty></LastSyncTime>
</GeoReplication>
</StorageServiceStats> | entailment |
def _get_firmware_update_element(self):
"""Get the url for firmware update
:returns: firmware update url
:raises: Missing resource error on missing url
"""
fw_update_action = self._actions.update_firmware
if not fw_update_action:
raise (sushy.exceptions.
MissingActionError(action='#UpdateService.SimpleUpdate',
resource=self._path))
return fw_update_action | Get the url for firmware update
:returns: firmware update url
:raises: Missing resource error on missing url | entailment |
def flash_firmware(self, redfish_inst, file_url):
"""Perform firmware flashing on a redfish system
:param file_url: url to firmware bits.
:param redfish_inst: redfish instance
:raises: IloError, on an error from iLO.
"""
action_data = {
'ImageURI': file_url,
}
target_uri = self._get_firmware_update_element().target_uri
try:
self._conn.post(target_uri, data=action_data)
except sushy.exceptions.SushyError as e:
msg = (('The Redfish controller failed to update firmware '
'with file %(file)s Error %(error)s') %
{'file': file_url, 'error': str(e)})
LOG.debug(msg) # noqa
raise exception.IloError(msg)
self.wait_for_redfish_firmware_update_to_complete(redfish_inst)
try:
state, percent = self.get_firmware_update_progress()
except sushy.exceptions.SushyError as e:
msg = ('Failed to get firmware progress update '
'Error %(error)s' % {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
if state == "Error":
msg = 'Unable to update firmware'
LOG.debug(msg) # noqa
raise exception.IloError(msg)
elif state == "Unknown":
msg = 'Status of firmware update not known'
LOG.debug(msg) # noqa
else: # "Complete" | "Idle"
LOG.info('Flashing firmware file: %s ... done', file_url) | Perform firmware flashing on a redfish system
:param file_url: url to firmware bits.
:param redfish_inst: redfish instance
:raises: IloError, on an error from iLO. | entailment |
def wait_for_redfish_firmware_update_to_complete(self, redfish_object):
"""Continuously polls for iLO firmware update to complete.
:param redfish_object: redfish instance
"""
p_state = ['Idle']
c_state = ['Idle']
def has_firmware_flash_completed():
"""Checks for completion status of firmware update operation
The below table shows the conditions for which the firmware update
will be considered as DONE (be it success or error)::
+-----------------------------------+-----------------------------+
| Previous state | Current state |
+===================================+=============================+
| Idle | Error, Complete |
+-----------------------------------+-----------------------------+
| Updating, Verifying, | Complete, Error, |
| Uploading, Writing | Unknown, Idle |
+-----------------------------------+-----------------------------+
:returns: True upon firmware update completion otherwise False
"""
curr_state, curr_percent = self.get_firmware_update_progress()
p_state[0] = c_state[0]
c_state[0] = curr_state
if (((p_state[0] in ['Updating', 'Verifying',
'Uploading', 'Writing'])
and (c_state[0] in ['Complete', 'Error',
'Unknown', 'Idle']))
or (p_state[0] == 'Idle' and (c_state[0] in
['Complete', 'Error']))):
return True
return False
common.wait_for_operation_to_complete(
has_firmware_flash_completed,
delay_bw_retries=30,
failover_msg='iLO firmware update has failed.'
)
common.wait_for_ilo_after_reset(redfish_object) | Continuously polls for iLO firmware update to complete.
:param redfish_object: redfish instance | entailment |
def get_firmware_update_progress(self):
"""Get the progress of the firmware update.
:returns: firmware update state, one of the following values:
"Idle","Uploading","Verifying","Writing",
"Updating","Complete","Error".
If the update resource is not found, then "Unknown".
:returns: firmware update progress percent
"""
# perform refresh
try:
self.refresh()
except sushy.exceptions.SushyError as e:
msg = (('Progress of firmware update not known. '
'Error %(error)s') %
{'error': str(e)})
LOG.debug(msg)
return "Unknown", "Unknown"
# NOTE: Percentage is returned None after firmware flash is completed.
return (self.firmware_state, self.firmware_percentage) | Get the progress of the firmware update.
:returns: firmware update state, one of the following values:
"Idle","Uploading","Verifying","Writing",
"Updating","Complete","Error".
If the update resource is not found, then "Unknown".
:returns: firmware update progress percent | entailment |
def pending_settings(self):
"""Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return BIOSPendingSettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) | Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | entailment |
def boot_settings(self):
"""Property to provide reference to bios boot instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return BIOSBootSettings(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "Boot"]),
redfish_version=self.redfish_version) | Property to provide reference to bios boot instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | entailment |
def iscsi_resource(self):
"""Property to provide reference to bios iscsi resource instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return iscsi.ISCSIResource(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "iScsi"]),
redfish_version=self.redfish_version) | Property to provide reference to bios iscsi resource instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | entailment |
def bios_mappings(self):
"""Property to provide reference to bios mappings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return BIOSMappings(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "Mappings"]),
redfish_version=self.redfish_version) | Property to provide reference to bios mappings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | entailment |
def _get_base_configs(self):
"""Method that returns object of bios base configs."""
return BIOSBaseConfigs(
self._conn, utils.get_subresource_path_by(
self, ["Oem", "Hpe", "Links", "BaseConfigs"]),
redfish_version=self.redfish_version) | Method that returns object of bios base configs. | entailment |
def set_pending_boot_mode(self, boot_mode):
"""Sets the boot mode of the system for next boot.
:param boot_mode: either sys_cons.BIOS_BOOT_MODE_LEGACY_BIOS,
sys_cons.BIOS_BOOT_MODE_UEFI.
"""
bios_properties = {
'BootMode': mappings.GET_BIOS_BOOT_MODE_MAP_REV.get(boot_mode)
}
if boot_mode == sys_cons.BIOS_BOOT_MODE_UEFI:
bios_properties['UefiOptimizedBoot'] = 'Enabled'
self.update_bios_data_by_patch(bios_properties) | Sets the boot mode of the system for next boot.
:param boot_mode: either sys_cons.BIOS_BOOT_MODE_LEGACY_BIOS,
sys_cons.BIOS_BOOT_MODE_UEFI. | entailment |
def update_bios_data_by_post(self, data):
"""Update bios data by post
:param data: default bios config data
"""
bios_settings_data = {
'Attributes': data
}
self._conn.post(self.path, data=bios_settings_data) | Update bios data by post
:param data: default bios config data | entailment |
def update_bios_data_by_patch(self, data):
"""Update bios data by patch
:param data: default bios config data
"""
bios_settings_data = {
'Attributes': data
}
self._conn.patch(self.path, data=bios_settings_data) | Update bios data by patch
:param data: default bios config data | entailment |
def get_persistent_boot_device(self):
"""Get current persistent boot device set for the host
:returns: persistent boot device for the system
:raises: IloError, on an error from iLO.
"""
boot_string = None
if not self.persistent_boot_config_order or not self.boot_sources:
msg = ('Boot sources or persistent boot config order not found')
LOG.debug(msg)
raise exception.IloError(msg)
preferred_boot_device = self.persistent_boot_config_order[0]
for boot_source in self.boot_sources:
if ((boot_source.get("StructuredBootString") is not None) and (
preferred_boot_device ==
boot_source.get("StructuredBootString"))):
boot_string = boot_source["BootString"]
break
else:
msg = (('Persistent boot device failed, as no matched boot '
'sources found for device: %(persistent_boot_device)s')
% {'persistent_boot_device': preferred_boot_device})
LOG.debug(msg)
raise exception.IloError(msg)
for key, value in BOOT_SOURCE_TARGET_TO_PARTIAL_STRING_MAP.items():
for val in value:
if val in boot_string:
return key
return sushy.BOOT_SOURCE_TARGET_NONE | Get current persistent boot device set for the host
:returns: persistent boot device for the system
:raises: IloError, on an error from iLO. | entailment |
def get_uefi_boot_string(self, mac):
"""Get uefi iscsi boot string for the host
:returns: iscsi boot string for the system
:raises: IloError, on an error from iLO.
"""
boot_sources = self.boot_sources
if not boot_sources:
msg = ('Boot sources are not found')
LOG.debug(msg)
raise exception.IloError(msg)
for boot_source in boot_sources:
if (mac.upper() in boot_source['UEFIDevicePath'] and
'iSCSI' in boot_source['UEFIDevicePath']):
return boot_source['StructuredBootString']
else:
msg = ('MAC provided "%s" is Invalid' % mac)
raise exception.IloInvalidInputError(msg) | Get uefi iscsi boot string for the host
:returns: iscsi boot string for the system
:raises: IloError, on an error from iLO. | entailment |
def visc_mol_sol(T,rho,X):
'''
Molecular plasma viscosity (Spitzer 1962)
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
molecular diffusivity in [cm**2/s]
Notes
-----
According to Eq 22 in Schatzman (1977). Assume log Lambda = 15.
(see Table 5.1), a H/He mix (for different mix use Eq. 5.54 in
Spitzer text book)
Examples
--------
see astronomy.visc_rad_kap_sc
'''
visc_mol = 1.84e-17*(1.+7.*X)*(old_div(T**2.5,rho))
return visc_mol | Molecular plasma viscosity (Spitzer 1962)
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
molecular diffusivity in [cm**2/s]
Notes
-----
According to Eq 22 in Schatzman (1977). Assume log Lambda = 15.
(see Table 5.1), a H/He mix (for different mix use Eq. 5.54 in
Spitzer text book)
Examples
--------
see astronomy.visc_rad_kap_sc | entailment |
def visc_rad_kap_sc(T,rho,X):
'''
Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
'''
kappa = 0.2*(1.+X)
nu_rad = 6.88e-26*(old_div(T**4,(kappa*rho**2)))
return nu_rad | Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2) | entailment |
def Gamma1_gasrad(beta):
'''
Gamma1 for a mix of ideal gas and radiation
Hansen & Kawaler, page 177, Eqn. 3.110
Parameters
----------
beta : float
Gas pressure fraction Pgas/(Pgas+Prad)
'''
Gamma3minus1 = (old_div(2.,3.))*(4.-3.*beta)/(8.-7.*beta)
Gamma1 = beta + (4.-3.*beta) * Gamma3minus1
return Gamma1 | Gamma1 for a mix of ideal gas and radiation
Hansen & Kawaler, page 177, Eqn. 3.110
Parameters
----------
beta : float
Gas pressure fraction Pgas/(Pgas+Prad) | entailment |
def Pgas(rho,T,mu):
'''
P = R/mu * rho * T
Parameters
----------
mu : float
Mean molecular weight
rho : float
Density [cgs]
T : float
Temperature [K]
'''
R = old_div(boltzmann_constant, atomic_mass_unit)
return (old_div(R,mu)) * rho * T | P = R/mu * rho * T
Parameters
----------
mu : float
Mean molecular weight
rho : float
Density [cgs]
T : float
Temperature [K] | entailment |
def mimf_ferrario(mi):
''' Curvature MiMf from Ferrario etal. 2005MNRAS.361.1131.'''
mf=-0.00012336*mi**6+0.003160*mi**5-0.02960*mi**4+\
0.12350*mi**3-0.21550*mi**2+0.19022*mi+0.46575
return mf | Curvature MiMf from Ferrario etal. 2005MNRAS.361.1131. | entailment |
def imf(m):
'''
Returns
-------
N(M)dM
for given mass according to Kroupa IMF, vectorization
available via vimf()
'''
m1 = 0.08; m2 = 0.50
a1 = 0.30; a2 = 1.30; a3 = 2.3
const2 = m1**-a1 -m1**-a2
const3 = m2**-a2 -m2**-a3
if m < 0.08:
alpha = 0.3
const = -const2 -const3
elif m < 0.50:
alpha = 1.3
const = -const3
else:
alpha = 2.3
const = 0.0
# print m,alpha, const, m**-alpha + const
return m**-alpha + const | Returns
-------
N(M)dM
for given mass according to Kroupa IMF, vectorization
available via vimf() | entailment |
def int_imf_dm(m1,m2,m,imf,bywhat='bymass',integral='normal'):
'''
Integrate IMF between m1 and m2.
Parameters
----------
m1 : float
Min mass
m2 : float
Max mass
m : float
Mass array
imf : float
IMF array
bywhat : string, optional
'bymass' integrates the mass that goes into stars of
that mass interval; or 'bynumber' which integrates the number
of stars in that mass interval. The default is 'bymass'.
integrate : string, optional
'normal' uses sc.integrate.trapz; 'cum' returns cumulative
trapezoidal integral. The default is 'normal'.
'''
ind_m = (m >= min(m1,m2)) & (m <= max(m1,m2))
if integral is 'normal':
int_func = sc.integrate.trapz
elif integral is 'cum':
int_func = sc.integrate.cumtrapz
else:
print("Error in int_imf_dm: don't know how to integrate")
return 0
if bywhat is 'bymass':
return int_func(m[ind_m]*imf[ind_m],m[ind_m])
elif bywhat is 'bynumber':
return int_func(imf[ind_m],m[ind_m])
else:
print("Error in int_imf_dm: don't know by what to integrate")
return 0 | Integrate IMF between m1 and m2.
Parameters
----------
m1 : float
Min mass
m2 : float
Max mass
m : float
Mass array
imf : float
IMF array
bywhat : string, optional
'bymass' integrates the mass that goes into stars of
that mass interval; or 'bynumber' which integrates the number
of stars in that mass interval. The default is 'bymass'.
integrate : string, optional
'normal' uses sc.integrate.trapz; 'cum' returns cumulative
trapezoidal integral. The default is 'normal'. | entailment |
def am_orb(m1,m2,a,e):
'''
orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity
'''
a_cm = a * rsun_cm
m1_g = m1 * msun_g
m2_g = m2 * msun_g
J_orb=np.sqrt(grav_const*a_cm*(old_div((m1_g**2*m2_g**2),(m1_g+m2_g))))*(1-e**2)
return J_orb | orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity | entailment |
def mass_loss_loon05(L,Teff):
'''
mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273
'''
Mdot = -5.65 + np.log10(old_div(L,10.**4)) -6.3*np.log10(old_div(Teff,3500.))
return Mdot | mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273 | entailment |
def energ_orb(m1,m2,r):
'''
Parameters
----------
m1, m2 : float
M in Msun.
r : float
Distance in Rsun.
Returns
-------
Epot
Epot in erg.
'''
epo = -grav_const * m1 * m2 * msun_g**2 / (r * rsun_cm)
return epo | Parameters
----------
m1, m2 : float
M in Msun.
r : float
Distance in Rsun.
Returns
-------
Epot
Epot in erg. | entailment |
def period(A,M1,M2):
"""
calculate binary period from separation.
Parameters
----------
A : float
separation A Rsun.
M1, M2 : float
M in Msun.
Returns
-------
p
period in days.
"""
A *= rsun_cm
print(A)
velocity = np.sqrt(grav_const*msun_g*(M1+M2)/A)
print(old_div(velocity,1.e5))
p = 2.*np.pi * A / velocity
p /= (60*60*24.)
return p | calculate binary period from separation.
Parameters
----------
A : float
separation A Rsun.
M1, M2 : float
M in Msun.
Returns
-------
p
period in days. | entailment |
def escape_velocity(M,R):
"""
escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s.
"""
ve = np.sqrt(2.*grav_const*M*msun_g/(R*rsun_cm))
ve = ve*1.e-5
return ve | escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s. | entailment |
def Nasv(macs,T):
'''
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = macs*1.e-27
Nasv = s*vtherm*Na
return Nasv | Returns
-------
Na*<sigma v>
for MACS [mb] at T [K]. | entailment |
def macs(nasv,T):
'''
Returns
-------
MACS
[mb] at T [K] from Na*<sigma v>.
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = old_div(nasv,(vtherm*Na))
macs = s*1.e27
return macs | Returns
-------
MACS
[mb] at T [K] from Na*<sigma v>. | entailment |
def mu_e(X):
'''
mean molecular weight per free electron, assuming full ionisation, and
approximating mu_i/Z_i ~ 2 for all elements heavier then Helium.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.8)
Parameters
----------
X : float
Mass fraction of H.
'''
try:
mu_e = old_div(2.,(1.+X))
except TypeError:
X=np.array([X])
mu_e = old_div(2.,(1.+X))
return mu_e | mean molecular weight per free electron, assuming full ionisation, and
approximating mu_i/Z_i ~ 2 for all elements heavier then Helium.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.8)
Parameters
----------
X : float
Mass fraction of H. | entailment |
def mu(X,Z,A):
'''
mean molecular weight assuming full ionisation.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.6)
Parameters
----------
X : float
Mass fraction vector.
Z : float
Charge number vector.
A : float
Mass number vector.
'''
if not isinstance(Z,np.ndarray):
Z = np.array(Z)
if not isinstance(A,np.ndarray):
A = np.array(A)
if not isinstance(X,np.ndarray):
X = np.array(X)
try:
mu = old_div(1.,sum(X*(1.+Z)/A))
except TypeError:
X=np.array([X])
A=np.array([A])
Z=np.array([Z])
mu = old_div(1.,sum(X*(1.+Z)/A))
return mu | mean molecular weight assuming full ionisation.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.6)
Parameters
----------
X : float
Mass fraction vector.
Z : float
Charge number vector.
A : float
Mass number vector. | entailment |
def Trho_iddeg(rho,mu,mu_e):
'''
T(rho) that separates ideal gas and degenerate pressure dominated regions.
Kippenhahn & Weigert, Eq. 16.6
Parameters
----------
rho : float
Density array [cgs].
mu : float
Mean molecular weight.
mu_e : float
Mean molecular weight per free electron.
'''
T = 1.207E5 * rho**(old_div(2.,3.)) * mu / mu_e**(old_div(5.,3.))
return T | T(rho) that separates ideal gas and degenerate pressure dominated regions.
Kippenhahn & Weigert, Eq. 16.6
Parameters
----------
rho : float
Density array [cgs].
mu : float
Mean molecular weight.
mu_e : float
Mean molecular weight per free electron. | entailment |
def _get_criteria_matching_disks(logical_disk, physical_drives):
"""Finds the physical drives matching the criteria of logical disk.
This method finds the physical drives matching the criteria
of the logical disk passed.
:param logical_disk: The logical disk dictionary from raid config
:param physical_drives: The physical drives to consider.
:returns: A list of physical drives which match the criteria
"""
matching_physical_drives = []
criteria_to_consider = [x for x in FILTER_CRITERIA
if x in logical_disk]
for physical_drive_object in physical_drives:
for criteria in criteria_to_consider:
logical_drive_value = logical_disk.get(criteria)
physical_drive_value = getattr(physical_drive_object, criteria)
if logical_drive_value != physical_drive_value:
break
else:
matching_physical_drives.append(physical_drive_object)
return matching_physical_drives | Finds the physical drives matching the criteria of logical disk.
This method finds the physical drives matching the criteria
of the logical disk passed.
:param logical_disk: The logical disk dictionary from raid config
:param physical_drives: The physical drives to consider.
:returns: A list of physical drives which match the criteria | entailment |
def allocate_disks(logical_disk, server, raid_config):
"""Allocate physical disks to a logical disk.
This method allocated physical disks to a logical
disk based on the current state of the server and
criteria mentioned in the logical disk.
:param logical_disk: a dictionary of a logical disk
from the RAID configuration input to the module.
:param server: An objects.Server object
:param raid_config: The target RAID configuration requested.
:raises: PhysicalDisksNotFoundError, if cannot find
physical disks for the request.
"""
size_gb = logical_disk['size_gb']
raid_level = logical_disk['raid_level']
number_of_physical_disks = logical_disk.get(
'number_of_physical_disks', constants.RAID_LEVEL_MIN_DISKS[raid_level])
share_physical_disks = logical_disk.get('share_physical_disks', False)
# Try to create a new independent array for this request.
for controller in server.controllers:
physical_drives = controller.unassigned_physical_drives
physical_drives = _get_criteria_matching_disks(logical_disk,
physical_drives)
if size_gb != "MAX":
# If we want to allocate for a logical disk for which size_gb is
# mentioned, we take the smallest physical drives which is required
# to match the criteria.
reverse_sort = False
physical_drives = [x for x in physical_drives
if x.size_gb >= size_gb]
else:
# If we want to allocate for a logical disk for which size_gb is
# MAX, we take the largest physical drives available.
reverse_sort = True
if len(physical_drives) >= number_of_physical_disks:
selected_drives = sorted(physical_drives, key=lambda x: x.size_gb,
reverse=reverse_sort)
selected_drive_ids = [x.id for x in selected_drives]
logical_disk['controller'] = controller.id
physical_disks = selected_drive_ids[:number_of_physical_disks]
logical_disk['physical_disks'] = physical_disks
return
# We didn't find physical disks to create an independent array.
# Check if we can get some shared arrays.
if share_physical_disks:
sharable_disk_wwns = []
for sharable_logical_disk in raid_config['logical_disks']:
if (sharable_logical_disk.get('share_physical_disks', False) and
'root_device_hint' in sharable_logical_disk):
wwn = sharable_logical_disk['root_device_hint']['wwn']
sharable_disk_wwns.append(wwn)
for controller in server.controllers:
sharable_arrays = [x for x in controller.raid_arrays if
x.logical_drives[0].wwn in sharable_disk_wwns]
for array in sharable_arrays:
# Check if criterias for the logical disk match the ones with
# physical disks in the raid array.
criteria_matched_disks = _get_criteria_matching_disks(
logical_disk, array.physical_drives)
# Check if all disks in the array don't match the criteria
if len(criteria_matched_disks) != len(array.physical_drives):
continue
# Check if raid array can accomodate the logical disk.
if array.can_accomodate(logical_disk):
logical_disk['controller'] = controller.id
logical_disk['array'] = array.id
return
# We check both options and couldn't get any physical disks.
raise exception.PhysicalDisksNotFoundError(size_gb=size_gb,
raid_level=raid_level) | Allocate physical disks to a logical disk.
This method allocated physical disks to a logical
disk based on the current state of the server and
criteria mentioned in the logical disk.
:param logical_disk: a dictionary of a logical disk
from the RAID configuration input to the module.
:param server: An objects.Server object
:param raid_config: The target RAID configuration requested.
:raises: PhysicalDisksNotFoundError, if cannot find
physical disks for the request. | entailment |
def trajectory_SgConst(Sg=0.1, delta_logt_dex=-0.01):
'''
setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01.
'''
# reverse logarithmic time
logtimerev=np.arange(5.,-6.,delta_logt_dex)
logrho=np.linspace(0,8.5,len(logtimerev))
logT = (old_div(1.,3.))*(logrho + 21.9161 + np.log10(Sg))
#rho_6=10**logrho/(0.1213*1.e6)
#T9=rho_6**(1./3.)
#logT_T3=np.log10(T9*1.e9)
pl.close(3);pl.figure(3);pl.plot(logrho,logT,label='$S/\mathrm{N_Ak}='+str(Sg)+'$')
pl.legend(loc=2);pl.xlabel('$\log \\rho$'); pl.ylabel('$\log T$')
pl.close(5);pl.figure(5);pl.plot(logtimerev, logrho)
pl.xlabel('$\log (t_\mathrm{final}-t)$'); pl.ylabel('$\log \\rho$')
pl.xlim(8,-6)
pl.close(6);pl.figure(6);pl.plot(logtimerev)
pl.ylabel('$\log (t_\mathrm{final}-t)$'); pl.xlabel('cycle')
# [t] logtimerev yrs
# [rho] cgs
# [T] K
T9=old_div(10**logT,1.e9)
data=[logtimerev,T9,logrho]
att.writeTraj(filename='trajectory.input', data=data, ageunit=2, tunit=1, rhounit=1, idNum=1) | setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01. | entailment |
def species_list(what_list):
'''
provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators.
'''
if what_list is "CNONe":
list_to_print = ['H-1','He-4','C-12','N-14','O-16','Ne-20']
elif what_list is "sprocess":
list_to_print = ['Fe-56','Ge-70','Zn-70','Se-76','Kr-80','Kr-82','Kr-86','Sr-88','Ba-138','Pb-208']
elif what_list is "burn_stages":
list_to_print = ['H-1','He-4','C-12','O-16','Ne-20','Si-28']
elif what_list is "list_marco_1":
list_to_print = ['C-12','O-16','Ne-20','Ne-22','Na-23','Fe-54','Fe-56','Zn-70','Ge-70','Se-76','Kr-80','Kr-82','Sr-88','Y-89','Zr-96','Te-124','Xe-130','Xe-134','Ba-138']
return list_to_print | provide default lists of elements to plot.
what_list : string
String name of species lists provided.
If what_list is "CNONe", then C, N, O and some other light
elements.
If what_list is "s-process", then s-process indicators. | entailment |
def linestyle(i,a=5,b=3):
'''
provide one out of 25 unique combinations of style, color and mark
use in combination with markevery=a+mod(i,b) to add spaced points,
here a would be the base spacing that would depend on the data
density, modulated with the number of lines to be plotted (b)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])
(c) 2014 FH
'''
lines=['-','--','-.',':']
points=['v','^','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','o']
colors=['b','g','r','c','m','k']
ls_string = colors[sc.mod(i,6)]+lines[sc.mod(i,4)]+points[sc.mod(i,18)]
mark_i = a+sc.mod(i,b)
return ls_string,int(mark_i) | provide one out of 25 unique combinations of style, color and mark
use in combination with markevery=a+mod(i,b) to add spaced points,
here a would be the base spacing that would depend on the data
density, modulated with the number of lines to be plotted (b)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])
(c) 2014 FH | entailment |
def colourblind(i):
'''
colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ
'''
rawRGBs = [(162,200,236),
(255,128,14),
(171,171,171),
(95,158,209),
(89,89,89),
(0,107,164),
(255,188,121),
(207,207,207),
(200,82,0),
(137,137,137)]
scaledRGBs = []
for r in rawRGBs:
scaledRGBs.append((old_div(r[0],255.),old_div(r[1],255.),old_div(r[2],255.)))
idx = sc.mod(i,len(scaledRGBs))
return scaledRGBs[idx] | colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ | entailment |
def colourblind2(i):
'''
another colour pallete from http://www.sron.nl/~pault/
allegedly suitable for colour-blind folk
SJ
'''
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',
'#CC6677', '#882255', '#AA4499']
idx = sc.mod(i,len(hexcols))
return hexcols[idx] | another colour pallete from http://www.sron.nl/~pault/
allegedly suitable for colour-blind folk
SJ | entailment |
def linestylecb(i,a=5,b=3):
'''
version of linestyle function with colourblind colour scheme
returns linetyle, marker, color (see example)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),ls=linestyle(7)[0], marker=linestyle(7)[1], \
color=linestyle(7)[2],markevery=linestyle(7)[3])
(c) 2014 FH
'''
lines=['-','--','-.',':']
points=['v','^','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','o']
colors=['b','g','r','c','m','k']
col=colourblind(i)
style=lines[sc.mod(i,4)]
point=points[sc.mod(i,18)]
mark_i = a+sc.mod(i,b)
return style,point,col,mark_i | version of linestyle function with colourblind colour scheme
returns linetyle, marker, color (see example)
Parameters
----------
i : integer
Number of linestyle combination - there are many....
a : integer
Spacing of marks. The default is 5.
b : integer
Modulation in case of plotting many nearby lines. The default
is 3.
Examples
--------
>>> plot(x,sin(x),ls=linestyle(7)[0], marker=linestyle(7)[1], \
color=linestyle(7)[2],markevery=linestyle(7)[3])
(c) 2014 FH | entailment |
def symbol_list(what_list):
'''
provide default symbol lists
Parameters
----------
what_list : string
String name of symbol lists provided; "list1", "list2",
"lines1" or "lines2".
'''
if what_list is "list1":
symbol=['ro','bo','ko','go','mo'\
,'r-','b-','k-','g-','m-','r--','b--','k--'\
,'g--','r1']
#symbol=['r+','ro','r-']
elif what_list is "list2":
symbol=['r-','b--','g-.','k:','md','.','o','v','^','<','>','1','2',\
'3','4','s','p','*','h','H','+']
elif what_list is "lines1":
symbol=['b--','k--','r--','c--','m--','g--','b-','k-','r-','c-','m-','g-','b.','b-.','k-.','r-.','c-.','m-.','g-.','b:','k:','r:','c:','m:','g:']
elif what_list is "lines2":
symbol=['g:','r-.','k-','b--','k-.','b+','r:','b-','c--','m--','g--','r-','c-','m-','g-','k-.','c-.','m-.','g-.','k:','r:','c:','m:','b-.','b:']
return symbol | provide default symbol lists
Parameters
----------
what_list : string
String name of symbol lists provided; "list1", "list2",
"lines1" or "lines2". | entailment |
def make_list(default_symbol_list, len_list_to_print):
'''
provide the list of symbols to use according for the list of
species/arrays to plot.
Parameters
----------
default_symbol_list : list
Symbols that the user choose to use.
len_list_to_print : integer
len of list of species/arrays to print.
'''
symbol_used = []
for i in range(len_list_to_print):
symbol_used.append(default_symbol_list[sc.mod(i,len(default_symbol_list))])
return symbol_used | provide the list of symbols to use according for the list of
species/arrays to plot.
Parameters
----------
default_symbol_list : list
Symbols that the user choose to use.
len_list_to_print : integer
len of list of species/arrays to print. | entailment |
def strictly_monotonic(bb):
'''
bb is an index array which may have numerous double or triple
occurrences of indices, such as for example the decay_index_pointer.
This method removes all entries <= -, then all dublicates and
finally returns a sorted list of indices.
'''
cc=bb[np.where(bb>=0)]
cc.sort()
dc=cc[1:]-cc[:-1] # subsequent equal entries have 0 in db
dc=np.insert(dc,0,1) # the first element is always unique (the second occurence is the dublicate)
dc_mask=np.ma.masked_equal(dc,0)
return np.ma.array(cc,mask=dc_mask.mask).compressed() | bb is an index array which may have numerous double or triple
occurrences of indices, such as for example the decay_index_pointer.
This method removes all entries <= -, then all dublicates and
finally returns a sorted list of indices. | entailment |
def solar(filename_solar, solar_factor):
'''
read solar abundances from filename_solar.
Parameters
----------
filename_solar : string
The file name.
solar_factor : float
The correction factor to apply, in case filename_solar is not
solar, but some file used to get initial abundances at
metallicity lower than solar. However, notice that this is
really rude, since alpha-enahncements and things like that are
not properly considered. Only H and He4 are not multiplied. So,
for publications PLEASE use proper filename_solar at...solar,
and use solar_factor = 1. Marco
'''
f0=open(filename_solar)
sol=f0.readlines()
f0.close
sol[0].split(" ")
# Now read in the whole file and create a hashed array:
global names_sol
names_sol=[]
global z_sol
z_sol=[]
yps=np.zeros(len(sol))
mass_number=np.zeros(len(sol))
for i in range(len(sol)):
z_sol.append(int(sol[i][1:3]))
names_sol.extend([sol[i].split(" ")[0][4:]])
yps[i]=float(sol[i].split(" ")[1]) * solar_factor
try:
mass_number[i]=int(names_sol[i][2:5])
except ValueError:
print("WARNING:")
print("This initial abundance file uses an element name that does")
print("not contain the mass number in the 3rd to 5th position.")
print("It is assumed that this is the proton and we will change")
print("the name to 'h 1' to be consistent with the notation used in")
print("iniab.dat files")
names_sol[i]='h 1'
mass_number[i]=int(names_sol[i][2:5])
if mass_number[i] == 1 or mass_number[i] == 4:
yps[i] = old_div(yps[i],solar_factor)
# convert 'h 1' in prot, not needed any more??
#names_sol[0] = 'prot '
# now zip them together:
global solar_abundance
solar_abundance={}
for a,b in zip(names_sol,yps):
solar_abundance[a] = b
z_bismuth = 83
global solar_elem_abund
solar_elem_abund = np.zeros(z_bismuth)
for i in range(z_bismuth):
dummy = 0.
for j in range(len(solar_abundance)):
if z_sol[j] == i+1:
dummy = dummy + float(solar_abundance[names_sol[j]])
solar_elem_abund[i] = dummy | read solar abundances from filename_solar.
Parameters
----------
filename_solar : string
The file name.
solar_factor : float
The correction factor to apply, in case filename_solar is not
solar, but some file used to get initial abundances at
metallicity lower than solar. However, notice that this is
really rude, since alpha-enahncements and things like that are
not properly considered. Only H and He4 are not multiplied. So,
for publications PLEASE use proper filename_solar at...solar,
and use solar_factor = 1. Marco | entailment |
def convert_specie_naming_from_h5_to_ppn(isotope_names):
'''
read isotopes names from h5 files, and convert them
according to standard scheme used inside ppn and mppnp. Also
Z and A are recalculated, for these species. Isomers are
excluded for now, since there were recent changes in isomers
name. As soon as the isomers names are settled, than Z and A
provided here will be obsolete, and can be changed by usual Z
and A.
'''
spe_rude1 = []
spe_rude2 = []
spe_rude3 = []
for i in range(len(isotope_names)):
spe_rude1.append(isotope_names[i].split('-')[0])
spe_rude2.append(isotope_names[i].split('-')[1])
# spe_rude1 is elem name and spe_rude2 is mass number.
#print spe_rude1,spe_rude2
k = 0
for i in range(len(spe_rude1)):
try:
if int(spe_rude2[i]) < 10:
spe_rude3.append(str(spe_rude1[i][0:2])+str(' ')+str(spe_rude2[i][0:3]))
elif int(spe_rude2[i]) >= 10 and int(spe_rude2[i]) < 100 :
spe_rude3.append(str(spe_rude1[i][0:2])+str(' ')+str(spe_rude2[i][0:3]))
elif int(spe_rude2[i]) >= 100 :
spe_rude3.append(str(spe_rude1[i][0:2])+str(spe_rude2[i][0:3]))
except ValueError:
k = k+1
None
global spe
spe = []
global n_array
n_array = []
for i in range(len(spe_rude3)):
if len(str(spe_rude1[i])) == 1:
spe.append(str(spe_rude3[i][0:1])+str(' ')+str(spe_rude3[i][1:4]))
else:
spe.append(spe_rude3[i])
n_array.append(i)
if spe[0]=='Ne 1':
spe[0] = 'N 1'
# spe_rude is the isotope name, in agreement with what we use in ppn, etc.
# need to do this to can use other functions without changing them drastically.
# here I skip isomers...
global amass_int
amass_int=np.zeros(len(spe_rude2))
for i in range(len(spe_rude2)-k):
amass_int[i]=int(spe_rude2[i])
#print amass_int
# here I have to create an array for the atomic numbers.
# I need to this when I calculate and plot element abundances
global znum_int
znum_int=np.zeros(len(spe))
for i in range(len(spe)):
znum_int[i] = Utils.elements_names.index(str(spe[i][0:2]).strip())
# changed by alex
# if str(spe[i][0:2]) == 'H ':
# znum_int[i] = 1
# elif str(spe[i][0:2]) == 'He':
# znum_int[i] = 2
# elif str(spe[i][0:2]) == 'Li':
# znum_int[i] = 3
# elif str(spe[i][0:2]) == 'Be':
# znum_int[i] = 4
# elif str(spe[i][0:2]) == 'B ':
# znum_int[i] = 5
# elif str(spe[i][0:2]) == 'C ':
# znum_int[i] = 6
# elif str(spe[i][0:2]) == 'N ':
# znum_int[i] = 7
# elif str(spe[i][0:2]) == 'O ':
# znum_int[i] = 8
# elif str(spe[i][0:2]) == 'F ':
# znum_int[i] = 9
# elif str(spe[i][0:2]) == 'Ne':
# znum_int[i] = 10
# elif str(spe[i][0:2]) == 'Na':
# znum_int[i] = 11
# elif str(spe[i][0:2]) == 'Mg':
# znum_int[i] = 12
# elif str(spe[i][0:2]) == 'Al':
# znum_int[i] = 13
# elif str(spe[i][0:2]) == 'Si':
# znum_int[i] = 14
# elif str(spe[i][0:2]) == 'P ':
# znum_int[i] = 15
# elif str(spe[i][0:2]) == 'S ':
# znum_int[i] = 16
# elif str(spe[i][0:2]) == 'Cl':
# znum_int[i] = 17
# elif str(spe[i][0:2]) == 'Ar':
# znum_int[i] = 18
# elif str(spe[i][0:2]) == 'K ':
# znum_int[i] = 19
# elif str(spe[i][0:2]) == 'Ca':
# znum_int[i] = 20
# elif str(spe[i][0:2]) == 'Sc':
# znum_int[i] = 21
# elif str(spe[i][0:2]) == 'Ti':
# znum_int[i] = 22
# elif str(spe[i][0:2]) == 'V ':
# znum_int[i] = 23
# elif str(spe[i][0:2]) == 'Cr':
# znum_int[i] = 24
# elif str(spe[i][0:2]) == 'Mn':
# znum_int[i] = 25
# elif str(spe[i][0:2]) == 'Fe':
# znum_int[i] = 26
# elif str(spe[i][0:2]) == 'Co':
# znum_int[i] = 27
# elif str(spe[i][0:2]) == 'Ni':
# znum_int[i] = 28
# elif str(spe[i][0:2]) == 'Cu':
# znum_int[i] = 29
# elif str(spe[i][0:2]) == 'Zn':
# znum_int[i] = 30
# elif str(spe[i][0:2]) == 'Ga':
# znum_int[i] = 31
# elif str(spe[i][0:2]) == 'Ge':
# znum_int[i] = 32
# elif str(spe[i][0:2]) == 'As':
# znum_int[i] = 33
# elif str(spe[i][0:2]) == 'Se':
# znum_int[i] = 34
# elif str(spe[i][0:2]) == 'Br':
# znum_int[i] = 35
# elif str(spe[i][0:2]) == 'Kr':
# znum_int[i] = 36
# elif str(spe[i][0:2]) == 'Rb':
# znum_int[i] = 37
# elif str(spe[i][0:2]) == 'Sr':
# znum_int[i] = 38
# elif str(spe[i][0:2]) == 'Y ':
# znum_int[i] = 39
# elif str(spe[i][0:2]) == 'Zr':
# znum_int[i] = 40
# elif str(spe[i][0:2]) == 'Nb':
# znum_int[i] = 41
# elif str(spe[i][0:2]) == 'Mo':
# znum_int[i] = 42
# elif str(spe[i][0:2]) == 'Tc':
# znum_int[i] = 43
# elif str(spe[i][0:2]) == 'Ru':
# znum_int[i] = 44
# elif str(spe[i][0:2]) == 'Rh':
# znum_int[i] = 45
# elif str(spe[i][0:2]) == 'Pd':
# znum_int[i] = 46
# elif str(spe[i][0:2]) == 'Ag':
# znum_int[i] = 47
# elif str(spe[i][0:2]) == 'Cd':
# znum_int[i] = 48
# elif str(spe[i][0:2]) == 'In':
# znum_int[i] = 49
# elif str(spe[i][0:2]) == 'Sn':
# znum_int[i] = 50
# elif str(spe[i][0:2]) == 'Sb':
# znum_int[i] = 51
# elif str(spe[i][0:2]) == 'Te':
# znum_int[i] = 52
# elif str(spe[i][0:2]) == 'I ':
# znum_int[i] = 53
# elif str(spe[i][0:2]) == 'Xe':
# znum_int[i] = 54
# elif str(spe[i][0:2]) == 'Cs':
# znum_int[i] = 55
# elif str(spe[i][0:2]) == 'Ba':
# znum_int[i] = 56
# elif str(spe[i][0:2]) == 'La':
# znum_int[i] = 57
# elif str(spe[i][0:2]) == 'Ce':
# znum_int[i] = 58
# elif str(spe[i][0:2]) == 'Pr':
# znum_int[i] = 59
# elif str(spe[i][0:2]) == 'Nd':
# znum_int[i] = 60
# elif str(spe[i][0:2]) == 'Pm':
# znum_int[i] = 61
# elif str(spe[i][0:2]) == 'Sm':
# znum_int[i] = 62
# elif str(spe[i][0:2]) == 'Eu':
# znum_int[i] = 63
# elif str(spe[i][0:2]) == 'Gd':
# znum_int[i] = 64
# elif str(spe[i][0:2]) == 'Tb':
# znum_int[i] = 65
# elif str(spe[i][0:2]) == 'Dy':
# znum_int[i] = 66
# elif str(spe[i][0:2]) == 'Ho':
# znum_int[i] = 67
# elif str(spe[i][0:2]) == 'Er':
# znum_int[i] = 68
# elif str(spe[i][0:2]) == 'Tm':
# znum_int[i] = 69
# elif str(spe[i][0:2]) == 'Yb':
# znum_int[i] = 70
# elif str(spe[i][0:2]) == 'Lu':
# znum_int[i] = 71
# elif str(spe[i][0:2]) == 'Hf':
# znum_int[i] = 72
# elif str(spe[i][0:2]) == 'Ta':
# znum_int[i] = 73
# elif str(spe[i][0:2]) == 'W ':
# znum_int[i] = 74
# elif str(spe[i][0:2]) == 'Re':
# znum_int[i] = 75
# elif str(spe[i][0:2]) == 'Os':
# znum_int[i] = 76
# elif str(spe[i][0:2]) == 'Ir':
# znum_int[i] = 77
# elif str(spe[i][0:2]) == 'Pt':
# znum_int[i] = 78
# elif str(spe[i][0:2]) == 'Au':
# znum_int[i] = 79
# elif str(spe[i][0:2]) == 'Hg':
# znum_int[i] = 80
# elif str(spe[i][0:2]) == 'Tl':
# znum_int[i] = 81
# elif str(spe[i][0:2]) == 'Pb':
# znum_int[i] = 82
# elif str(spe[i][0:2]) == 'Bi':
# znum_int[i] = 83
# elif str(spe[i][0:2]) == 'Po':
# znum_int[i] = 84
# elif str(spe[i][0:2]) == 'At':
# znum_int[i] = 85
# elif str(spe[i][0:2]) == 'Rn':
# znum_int[i] = 86
# elif str(spe[i][0:2]) == 'Fr':
# znum_int[i] = 87
# elif str(spe[i][0:2]) == 'Ra':
# znum_int[i] = 88
# elif str(spe[i][0:2]) == 'Ac':
# znum_int[i] = 89
# elif str(spe[i][0:2]) == 'Th':
# znum_int[i] = 90
# elif str(spe[i][0:2]) == 'Pa':
# znum_int[i] = 91
# elif str(spe[i][0:2]) == 'U ':
# znum_int[i] = 92
# elif str(spe[i][0:2]) == 'Np':
# znum_int[i] = 93
# elif str(spe[i][0:2]) == 'Pu':
# znum_int[i] = 94
# elif str(spe[i][0:2]) == 'Am':
# znum_int[i] = 95
# elif str(spe[i][0:2]) == 'Cm':
# znum_int[i] = 96
# elif str(spe[i][0:2]) == 'Bk':
# znum_int[i] = 97
# elif str(spe[i][0:2]) == 'Cf':
# znum_int[i] = 98
if spe[0] == 'N 1':
znum_int[0] = 0
# here the index to connect name and atomic numbers.
global index_atomic_number
index_atomic_number = {}
for a,b in zip(spe,znum_int):
index_atomic_number[a]=b | read isotopes names from h5 files, and convert them
according to standard scheme used inside ppn and mppnp. Also
Z and A are recalculated, for these species. Isomers are
excluded for now, since there were recent changes in isomers
name. As soon as the isomers names are settled, than Z and A
provided here will be obsolete, and can be changed by usual Z
and A. | entailment |
def define_zip_index_for_species(names_ppn_world,
number_names_ppn_world):
''' This just give back cl, that is the original index as it is read from files from a data file.'''
#connect the specie number in the list, with the specie name
global cl
cl={}
for a,b in zip(names_ppn_world,number_names_ppn_world):
cl[a] = b | This just give back cl, that is the original index as it is read from files from a data file. | entailment |
def element_abund_marco(i_decay, stable_isotope_list,
stable_isotope_identifier,
mass_fractions_array_not_decayed,
mass_fractions_array_decayed):
'''
Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated
'''
# this way is done in a really simple way. May be done better for sure, in a couple of loops.
# I keep this, since I have only to copy over old script. Falk will probably redo it.
#import numpy as np
#from NuGridPy import utils as u
global elem_abund
elem_abund = np.zeros(z_bismuth)
global elem_abund_decayed
elem_abund_decayed = np.zeros(z_bismuth)
global elem_prod_fac
elem_prod_fac = np.zeros(z_bismuth)
global elem_prod_fac_decayed
elem_prod_fac_decayed = np.zeros(z_bismuth)
# notice that elem_abund include all contribution, both from stables and unstables in
# that moment.
for i in range(z_bismuth):
dummy = 0.
for j in range(len(spe)):
if znum_int[j] == i+1 and stable_isotope_identifier[j] > 0.5:
dummy = dummy + float(mass_fractions_array_not_decayed[j])
elem_abund[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac[i] = float(old_div(elem_abund[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac[i] = 0.
if i_decay == 2:
for i in range(z_bismuth):
dummy = 0.
for j in range(len(mass_fractions_array_decayed)):
if znum_int[cl[stable_isotope_list[j].capitalize()]] == i+1:
#print znum_int[cl[stable[j].capitalize()]],cl[stable[j].capitalize()],stable[j]
dummy = dummy + float(mass_fractions_array_decayed[j])
elem_abund_decayed[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac_decayed[i] = float(old_div(elem_abund_decayed[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac_decayed[i] = 0. | Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated | entailment |
def stable_specie():
''' provide the list of stable species, and decay path feeding stables '''
#import numpy as np
stable_raw=[]
stable_raw = ['H 1', 'H 2',\
'HE 3', 'HE 4',\
'LI 6', 'LI 7',\
'BE 9',\
'B 10', 'B 11',\
'C 12', 'C 13',\
'N 14', 'N 15',\
'O 16', 'O 17', 'O 18',\
'F 19',\
'NE 20', 'NE 21', 'NE 22',\
'NA 23',\
'MG 24', 'MG 25', 'MG 26',\
'AL 27',\
'SI 28', 'SI 29', 'SI 30',\
'P 31',\
'S 32', 'S 33', 'S 34', 'S 36',\
'CL 35', 'CL 37',\
'AR 36', 'AR 38', 'AR 40',\
'K 39', 'K 40', 'K 41',\
'CA 40', 'CA 42', 'CA 43', 'CA 44', 'CA 46', 'CA 48',\
'SC 45',\
'TI 46', 'TI 47', 'TI 48', 'TI 49', 'TI 50',\
'V 50', 'V 51',\
'CR 50', 'CR 52', 'CR 53', 'CR 54',\
'MN 55',\
'FE 54', 'FE 56', 'FE 57', 'FE 58',\
'CO 59',\
'NI 58', 'NI 60', 'NI 61', 'NI 62', 'NI 64',\
'CU 63', 'CU 65',\
'ZN 64', 'ZN 66', 'ZN 67', 'ZN 68', 'ZN 70',\
'GA 69', 'GA 71',\
'GE 70', 'GE 72', 'GE 73', 'GE 74', 'GE 76',\
'AS 75',\
'SE 74', 'SE 76', 'SE 77', 'SE 78', 'SE 80', 'SE 82',\
'BR 79', 'BR 81',\
'KR 78', 'KR 80', 'KR 82', 'KR 83', 'KR 84', 'KR 86',\
'RB 85', 'RB 87',\
'SR 84', 'SR 86', 'SR 87', 'SR 88',\
'Y 89',\
'ZR 90', 'ZR 91', 'ZR 92', 'ZR 94', 'ZR 96',\
'NB 93',\
'MO 92', 'MO 94', 'MO 95', 'MO 96', 'MO 97', 'MO 98', 'MO100',\
'RU 96', 'RU 98', 'RU 99', 'RU100', 'RU101', 'RU102', 'RU104',\
'RH103',\
'PD102', 'PD104', 'PD105', 'PD106', 'PD108', 'PD110',\
'AG107', 'AG109',\
'CD106', 'CD108', 'CD110', 'CD111', 'CD112', 'CD113', 'CD114', 'CD116',\
'IN113', 'IN115',\
'SN112', 'SN114', 'SN115', 'SN116', 'SN117', 'SN118', 'SN119', 'SN120', 'SN122', 'SN124',\
'SB121', 'SB123',\
'TE120', 'TE122', 'TE123', 'TE124', 'TE125', 'TE126', 'TE128', 'TE130',\
'I 127',\
'XE124', 'XE126', 'XE128', 'XE129', 'XE130', 'XE131', 'XE132', 'XE134', 'XE136',\
'CS133',\
'BA130', 'BA132', 'BA134', 'BA135', 'BA136', 'BA137', 'BA138',\
'LA138', 'LA139',\
'CE136', 'CE138', 'CE140', 'CE142',\
'PR141',\
'ND142', 'ND143', 'ND144', 'ND145', 'ND146', 'ND148', 'ND150',\
'SM144', 'SM147', 'SM148', 'SM149', 'SM150', 'SM152', 'SM154',\
'EU151', 'EU153',\
'GD152', 'GD154', 'GD155', 'GD156', 'GD157', 'GD158', 'GD160',\
'TB159',\
'DY156', 'DY158', 'DY160', 'DY161', 'DY162', 'DY163', 'DY164',\
'HO165',\
'ER162', 'ER164', 'ER166', 'ER167', 'ER168', 'ER170',\
'TM169',\
'YB168', 'YB170', 'YB171', 'YB172', 'YB173', 'YB174', 'YB176',\
'LU175', 'LU176',\
'HF174', 'HF176', 'HF177', 'HF178', 'HF179', 'HF180',\
'TA180', 'TA181',\
'W 180', 'W 182', 'W 183', 'W 184', 'W 186',\
'RE185', 'RE187',\
'OS184', 'OS186', 'OS187', 'OS188', 'OS189', 'OS190', 'OS192',\
'IR191', 'IR193',\
'PT190', 'PT192', 'PT194', 'PT195', 'PT196', 'PT198',\
'AU197',\
'HG196', 'HG198', 'HG199', 'HG200', 'HG201', 'HG202', 'HG204',\
'TL203', 'TL205',\
'PB204', 'PB206', 'PB207', 'PB208',\
'BI209',\
'TH232',\
'U 235','U 238']
jj=-1
global count_size_stable
count_size_stable=[]
global stable
stable=[]
global jdum
jdum=np.zeros(len(stable_raw))
global jjdum
jjdum=np.zeros(len(spe))
for i in range(len(stable_raw)):
dum_str = stable_raw[i]
for j in range(len(spe)):
if stable_raw[i].capitalize() == spe[j]:
stable.append(stable_raw[i])
jdum[i]=1
jjdum[j]=1
jj=jj+1
count_size_stable.append(int(jj))
#print stable
# back_ind is an index to go back, to use the order of stable
# useful for example for decayed yields.
global back_ind
back_ind={}
for a,b in zip(stable,count_size_stable):
back_ind[a]=b
#print 'in stable:',back_ind['SE 74']
# definition of decay paths
global decay_raw
decay_raw=[]
decay_raw=[['H 1'],\
['H 2'],\
['HE 3'],\
['HE 4','B 8'],\
['LI 6'],\
['LI 7','BE 7'],\
['BE 9'],\
['B 10','BE 10'],\
['B 11','C 11','BE 11'],\
['C 12'],\
['C 13','N 13','O 13'],\
['N 14','C 14','O 14'],\
['N 15','C 15','O 15','F 15'],\
['O 16'],\
['O 17','F 17'],\
['O 18','F 18','NE 18'],\
['F 19','O 19','NE 19'],\
['NE 20','F 20','NA 20'],\
['NE 21','F 21','NA 21'],\
['NE 22','NA 22','MG 22','F 22'],\
['NA 23','MG 23','NE 23'],\
['MG 24','AL 24','NA 24','NE 24',],\
['MG 25','NA 25','AL 25'],\
['MG 26','SI 26','AL 26','NA 26','AL*26'],\
['AL 27','SI 27','MG 27'],\
['SI 28','AL 28','MG 28'],\
['SI 29','P 29','AL 29','MG 29'],\
['SI 30','S 30','P 30','AL 30','MG 30'],\
['P 31','S 31','SI 31'],\
['S 32','P 32','SI 32'],\
['S 33','CL 33','P 33','SI 33'],\
['S 34','CL 34','P 34'],\
['S 36','P 36'],\
['CL 35','S 35','AR 35'],\
['CL 37','S 37','P 37','AR 37','K 37'],\
['AR 36','CL 36'],\
['AR 38','CL 38','S 38','P 38','K 38'],\
['AR 40','CL 40','S 40'],\
['K 39','AR 39','CL 39','S 39','CA 39'],\
['K 40'],\
['K 41','AR 41','CL 41','S 41','CA 41','SC 41'],\
['CA 40','SC 40'],\
['CA 42','K 42','AR 42','CL 42','S 42','SC 42','TI 42'],\
['CA 43','K 43','AR 43','CL 43','SC 43','TI 43','V 43'],\
['CA 44','K 44','AR 44','CL 44','SC 44','TI 44'],\
['CA 46','K 46','AR 46'],\
['CA 48','K 48','AR 48'],\
['SC 45','CA 45','K 45','AR 45','CL 45','TI 45','V 45'],\
['TI 46','SC 46','V 46','CR 46'],\
['TI 47','SC 47','CA 47','K 47','AR 47','V 47','CR 47'],\
['TI 48','SC 48','V 48','CR 48'],\
['TI 49','SC 49','CA 49','K 49','V 49','CR 49','MN 49'],\
['TI 50','SC 50','CA 50','K 50'],\
['V 50'],\
['V 51','CR 51','TI 51','SC 51','CA 51','MN 51'],\
['CR 50','MN 50'],\
['CR 52','MN 52','FE 52','V 52','TI 52','SC 52','CA 52'],\
['CR 53','MN 53','FE 53','V 53','TI 53','SC 53'],\
['CR 54','MN 54','V 54','TI 54','SC 54'],\
['MN 55','FE 55','CR 55','V 55','TI 55','CO 55'],\
['FE 54','CO 54'],\
['FE 56','NI 56','CO 56','MN 56','CR 56'],\
['FE 57','NI 57','CO 57','MN 57','CR 57'],\
['FE 58','CO 58','MN 58','CR 58'],\
['CO 59','FE 59','MN 59','CR 59','NI 59','CU 59'],\
['NI 58','CU 58'],\
['NI 60','CO 60','FE 60','MN 60','CR 60','CU 60','ZN 60'],\
['NI 61','CO 61','FE 61','MN 61','CU 61','ZN 61'],\
['NI 62','CO 62','FE 62','CU 62','ZN 62'],\
['NI 64','CO 64','FE 64','CU 64'],\
['CU 63','NI 63','CO 63','FE 63','MN 63','ZN 63','GA 63'],\
['CU 65','NI 65','CO 65','FE 65','ZN 65','GA 65','GE 65'],\
['ZN 64','CU 64','GA 64','GE 64'],\
['ZN 66','CU 66','NI 66','CO 66','FE 66','GA 66','GE 66'],\
['ZN 67','CU 67','NI 67','CO 67','FE 67','GA 67','GE 67','AS 77'],\
['ZN 68','NI 68','CO 68','GA 68','GE 68','CU 68','AS 68','SE 68'],\
['ZN 70','CU 70','NI 70','CO 70'],\
['GA 69','ZN 69','CU 69','NI 69','GE 69','AS 69','SE 69'],\
['GA 71','ZN 71','CU 71','NI 71','GE 71','AS 71','SE 71','BR 71'],\
['GE 70','GA 70','AS 70','SE 70','BR 70'],\
['GE 72','GA 72','ZN 72','CU 72','NI 72','AS 72','SE 72','BR 72','KR 72'],\
['GE 73','GA 73','ZN 73','CU 73','NI 73','AS 73','SE 73','BR 73','KR 73'],\
['GE 74','GA 74','ZN 74','CU 74','NI 74','AS 74'],\
['GE 76','GA 76','ZN 76','CU 76'],\
['AS 75','GE 75','GA 75','ZN 75','CU 75','SE 75','BR 75','KR 75','RB 75'],\
['SE 74','AS 74','BR 74','KR 74'],\
['SE 76','AS 76','BR 76','KR 76','RB 76','SR 76'],\
['SE 77','AS 77','GE 77','BR 77','GA 77','ZN 77','KR 77','RB 77','SR 77'],\
['SE 78','AS 78','GE 78','GA 78','ZN 78','BR 78'],\
['SE 80','AS 80','GE 80','GA 80','ZN 80'],\
['SE 82','AS 82','GE 82','GA 82'],\
['BR 79','SE 79','AS 79','GE 79','GA 79','ZN 79','KR 79','RB 79','SR 79','Y 79'],\
['BR 81','SE 81','KR 81','AS 81','GE 81','GA 81','RB 81','SR 81','Y 81','ZR 81'],\
['KR 78','RB 78','SR 78','Y 78'],\
['KR 80','BR 80','RB 80','SR 80','ZR 80'],\
['KR 82','BR 82','RB 82','SR 82','Y 82','ZR 82'],\
['KR 83','BR 83','SE 83','AS 83','GE 83','RB 83','SR 83','Y 83','ZR 83','NB 83'],\
['KR 84','BR 84','SE 84','AS 84','GE 84','RB 84'],\
['KR 86','BR 86','SE 86','AS 86'],\
['RB 85','KR 85','SR 85','KR*85','BR 85','SE 85','AS 85','Y 85','ZR 85','NB 85','MO 85'],\
['RB 87','KR 87','BR 87','SE 87','AS 87'],\
['SR 84','Y 84','ZR 84','NB 84','MO 84'],\
['SR 86','RB 86','Y 86','ZR 86','NB 86','MO 86'],\
['SR 87','Y 87','ZR 87','NB 87','MO 87','TC 87'],\
['SR 88','RB 88','KR 88','BR 88','SE 88','Y 88','ZR 88','NB 88','MO 88','TC 88'],\
['Y 89','SR 89','RB 89','KR 89','BR 89','ZR 89','NB 89','MO 89','TC 89','RU 89'],\
['ZR 90','Y 90','SR 90','RB 90','KR 90','BR 90','NB 90','MO 90','TC 90','RU 90'],\
['ZR 91','Y 91','SR 91','RB 91','KR 91','BR 91','SE 91','NB 91','MO 91','TC 91','RU 91','RH 91'],\
['ZR 92','Y 92','SR 92','RB 92','KR 92','BR 92','NB 92'],\
['ZR 94','Y 94','SR 94','RB 94','KR 94'],\
['ZR 96','Y 96','SR 96'],\
['NB 93','ZR 93','Y 93','SR 93','RB 93','KR 93','MO 93','TC 93','RU 93','RH 93','PD 93'],\
['MO 92','TC 92','RU 92','RH 92','PD 92'],\
['MO 94','NB 94','TC 94','RU 94','RH 94','PD 94'],\
['MO 95','NB 95','ZR 95','Y 95','SR 95','TC 95','RU 95','RH 95','PD 95','AG 95'],\
['MO 96','NB 96','TC 96'],\
['MO 97','NB 97','ZR 97','Y 97','SR 97','TC 97','RU 97','RH 97','PD 97','AG 97','CD 97'],\
['MO 98','NB 98','ZR 98','Y 98','SR 98'],\
['MO100','NB100','ZR100','Y 100'],\
['RU 96','RH 96','PD 96','AG 96'],\
['RU 98','TC 98','RH 98','PD 98','AG 98','CD 98'],\
['RU 99','TC 99','MO 99','NB 99','ZR 99','Y 99','RH 99','PD 99','AG 99','CD 99','IN 99'],\
['RU100','TC100','RH100','PD100','AG100','CD100','IN100','SN100'],\
['RU101','TC101','MO101','NB101','ZR101','Y 101','RH101','PD101','AG101','CD101','IN101','SN101'],\
['RU102','MO102','TC102','NB102','ZR102','Y 102','RH102'],\
['RU104','TC104','MO104','NB104'],\
['RH103','RU103','TC103','MO103','NB103','ZR103','Y 103','PD103','AG103','CD103','IN103','SN103'],\
['PD102','AG102','CD102','IN102','SN102'],\
['PD104','RH104','AG104','CD104','IN104','SN104','SB104'],\
['PD105','RH105','RU105','TC105','MO105','NB105','ZR105','AG105','CD105','IN105','SN105','SB105'],\
['PD106','RH106','RU106','TC106','MO106','NB106','AG106'],\
['PD108','RH108','RU108','TC108','MO108','NB108'],\
['PD110','RH110','RU110','TC110','MO110','NB110'],\
['AG107','PD107','RH107','RU107','TC107','MO107','CD107','IN107','SN107','SB107'],\
['AG109','PD109','RH109','RU109','TC109','MO109','NB109','CD109','IN109','SN109','SB109','TE109'],\
['CD106','IN106','SN106','SB106'],\
['CD108','AG108','IN108','SN108','SB108'],\
['CD110','AG110','IN110','SN110','SB110','TE110'],\
['CD111','AG111','PD111','RH111','RU111','TC111','IN111','SN111','SB111','TE111','I 111'],\
['CD112','AG112','PD112','RH112','RU112','TC112'],\
['CD113','AG113','PD113','RH113','RU113'],\
['CD114','AG114','PD114','RH114','RU114'],\
['CD116','AG116','PD116','RH116'],\
['IN113','SN113','SB113','TE113','I 113','XE113'],\
['IN115','CD115','AG115','PD115','RH115','RU115'],\
['SN112','IN112','SB112','TE112','I 112','XE112'],\
['SN114','IN114','SB114','TE114','I 114','XE114','CS114'],\
['SN115','SB115','TE115','I 115','XE115','CS115','BA115'],\
['SN116','IN116','SB116','TE116','I 116','XE116','CS116','BA116'],\
['SN117','IN117','AG117','PD117','RH117','SB117','CD117','TE117','I 117','XE117','CS117','BA117'],\
['SN118','IN118','CD118','AG118','PD118','SB118','TE118','XE118','CS118','BA118'],\
['SN119','IN119','CD119','AG119','PD119','SB119','TE119','XE119','CS119','BA119'],\
['SN120','SB120','IN120','CD120','AG120','PD120'],\
['SN122','IN122','CD122','AG122'],\
['SN124','IN124','CD124'],\
['SB121','SN121','IN121','CD121','AG121','TE121','I 121','XE121','CS121','XE121','CS121','BA121','LA121','CE121'],
['SB123','SN123','IN123','CD123','AG123'],\
['TE120','I 120','XE120','CS120','BA120','LA120'],\
['TE122','SB122','I 122','XE122','CS122','BA122','LA122'],\
['TE123','I 123','XE123','CS123','BA123','LA123','CE123'],\
['TE124','SB124','I 124'],\
['TE125','SB125','SN125','IN125','CD125','I 125','XE125','CS125','BA125','LA125','CE125','PR125'],\
['TE126','SB126','SN126','IN126','CD126'],\
['TE128','SB128','SN128','IN128','CD128'],\
['TE130','SB130','SN130','IN130'],\
['I 127','TE127','SB127','SN127','IN127','CD127','XE127','CS127','BA127','LA127','CE127','PR127','ND127'],\
['XE124','CS124','BA124','LA124','CE124','PR124'],\
['XE126','I 126','CS126','BA126','LA126','CE126','PR126'],\
['XE128','I 128','CS128','BA128','LA128','CE128','PR128'],\
['XE129','I 129','TE129','SB129','SN129','IN129','CD129','CS129','BA129','LA129','CE129','PR129','ND129'],\
['XE130','I 130','CS130'],\
['XE131','I 131','TE131','SB131','SN131','IN131','CS131','BA131','LA131','CE131','PR131','ND131','PM131','SM131'],\
['XE132','I 132','TE132','SB132','SN132','IN132','CS132'],
['XE134','I 134','TE134','SB134','SN134'],\
['XE136','I 136','TE136','SB136'],\
['CS133','XE133','I 133','TE133','SB133','SN133','BA133','LA133','CE133','PR133','ND133','PM133','SM133'],\
['BA130','LA130','CE130','PR130','ND130','PM130'],\
['BA132','LA132','CE132','PR132','ND132','PM132','SM132'],\
['BA134','CS134','LA134','CE134','PR134','ND134','PM134','SM134','EU134'],\
['BA135','CS135','XE135','I 135','TE135','SB135','SN135','LA135','CE135','PR135','ND135','PM135','SM135','EU135','GD135'],\
['BA136','CS136','LA136'],\
['BA137','CS137','XE137','I 137','TE137','SB137','LA137','CE137','PR137','ND137','PM137','SM137','EU137','GD137'],\
['BA138','CS138','XE138','I 138','TE138'],\
['LA138'],\
['LA139','BA139','CS139','XE139','I 139','CE139','PR139','ND139','PM139','SM139','EU139','GD139','TB139','DY139'],\
['CE136','PR136','ND136','PM136','SM136','EU136'],\
['CE138','PR138','ND138','PM138','SM138','EU138','GD138'],\
['CE140','LA140','BA140','CS140','XE140','I 140','PR140','ND140','PM140','SM140','EU140','GD140','TB140'],\
['CE142','LA142','BA142','CS142','XE142','I 142'],\
['PR141','CE141','LA141','BA141','CS141','XE141','I 141','ND141','PM141','SM141','EU141','GD141','TB141','DY141'],\
['ND142','PR142','PM142','SM142','EU142','GD142','TB142','DY142','HO142','SM146','EU146','GD146','TB146','DY146','HO146','ER146',\
'GD150','TB150','DY150','HO150','ER150','DY154','HO154','ER154'],\
['ND143','PR143','CE143','LA143','BA143','CS143','XE143','PM143','SM143','EU143','GD143','TB143','DY143'],\
['ND144','PR144','CE144','LA144','BA144','CS144','XE144','PM144'],\
['ND145','PR145','CE145','LA145','BA145','CS145','PM145','SM145','EU145','GD145','TB145','DY145','HO145','ER145'],\
['ND146','PR146','CE146','LA146','BA146','CS146','PM146'],\
['ND148','PR148','CE148','LA148','BA148'],\
['ND150','PR150','CE150','LA150','BA150'],\
['SM144','EU144','GD144','TB144','DY144','HO144','GD148','TB148','DY148','HO148','ER148','TM148'],\
['SM147','PM147','ND147','PR147','CE147','LA147','BA147','EU147','GD147','TB147','DY147','HO147','ER147'],\
['SM148','PM148','EU148'],\
['SM149','PM149','ND149','PR149','CE149','LA149','EU149','GD149','TB149','DY149','HO149','ER149','TM149','YB149'],\
['SM150','PM150','EU150'],\
['SM152','PM152','ND152','PR152','CE152','EU152'],\
['SM154','PM154','ND154','PR154'],\
['EU151','SM151','PM151','ND151','PR151','CE151','GD151','TB151'],\
['EU153','SM153','PM153','GD153','PR153','GD153','TB153','DY153','HO153'],\
['GD152','TB152','DY152','HO152'],\
['GD154','EU154','TB154'],\
['GD155','EU155','SM155','PM155','ND155','TB155','DY155','HO155','ER155','TM155'],\
['GD156','EU156','SM156','PM156','ND156','TB156'],\
['GD157','EU157','SM157','PM157','TB157','DY157','HO157','ER157','TM157','YB157'],\
['GD158','EU158','SM158','PM158','TB158'],\
['GD160','EU160','SM160'],\
['TB159','GD159','EU159','SM159','PM159','DY159','HO159','ER159','TM159','YB159','LU159'],\
['DY156','HO156','ER156','TM156','YB156','HO156','ER156','TM156','YB156'],\
['DY158','HO158','ER158','TM158','YB158','LU158'],\
['DY160','TB160','HO160','ER160','TM160','YB160','LU160','HF160'],\
['DY161','TB161','GD161','EU161','SM161','PM161','ND161','HO161','ER161','TM161','YB161','LU161','HF161','TA161'],\
['DY162','TB162','GD162','EU162','SM162','PM162','HO162'],\
['DY163','TB163','HO163','GD163','EU163','SM163','PM163','ND163','ER163','TM163','YB163','LU163','HF163','TA163'],\
['DY164','TB164','HO164','GD164','EU164','SM164','PM164','ND164','HO164'],\
['HO165','DY165','ER165','TB165','GD165','EU165','SM165','PM165','HO165','TM165','YB165','LU165','HF165','TA165','W 165'],\
['ER162','TM162','YB162','LU162','HF162','TA162'],\
['ER164','TM164','YB164','LU164','HF164','TA164','W 164'],\
['ER166','HO166','DY166','TB166','GD166','EU166','SM166','PM166','ND166','TM166','YB166','LU166','HF166','TA166','W 166','RE166'],\
['ER167','HO167','DY167','TB167','GD167','EU167','SM167','PM167','ND167','TM167','YB167','LU167','HF167','TA167','W 167','RE167'],\
['ER168','HO168','DY168','TB168','GD168','EU168','SM168','PM168','ND168','TM168'],\
['ER170','HO170','DY170','TB170','GD170','EU170','SM170','PM170','ND170'],\
['TM169','ER169','HO169','DY169','TB169','GD169','EU169','SM169','PM169','ND169','YB169','LU169','HF169','TA169','W 169','RE169'],\
['YB168','LU168','HF168','TA168','W 168','RE168'],\
['YB170','TM170','LU170','HF170','TA170','W 170','RE170','OS170'],\
['YB171','TM171','ER171','HO171','DY171','TB171','GD171','EU171','SM171','PM171','ND171','LU171','HF171','TA171','W 171','RE171','OS171'],\
['YB172','TM172','ER172','HO172','DY172','TB172','GD172','EU172','SM172','PM172','ND172','LU172','HF172','TA172','W 172','RE172','OS172','IR172'],\
['YB173','TM173','ER173','HO173','DY173','TB173','GD173','EU173','SM173','PM173','ND173','LU173','HF173','TA173','W 173','RE173','OS173','IR173'],\
['YB174','TM174','ER174','HO174','DY174','TB174','GD174','EU174','SM174','PM174','ND174','LU174'],\
['YB176','TM176','ER176','HO176','DY176','TB176','GD176','EU176','SM176','PM176','ND176'],\
['LU175','YB175','TM175','ER175','HO175','DY175','TB175','GD175','EU175','SM175','PM175','ND175','HF175','TA175','W 175','RE175','OS175','IR175'],\
['LU176','HF176','TA176','W 176','RE176','OS176','IR176'],\
['HF174','TA174','W 174','RE174','OS174','IR174'],\
['HF176','TA176','W 176','RE176','OS176','IR176'],\
['HF177','LU177','YB177','LU177','YB177','TM177','ER177','HO177','DY177','TB177','GD177','TA177','W 177','RE177','OS177','IR177'],\
['HF178','LU178','YB178','LU178','YB178','TM178','ER178','HO178','DY178','TB178','GD178','TA178','W 178','RE178','OS178','IR178'],\
['HF179','LU179','YB179','LU179','YB179','TM179','ER179','HO179','DY179','TB179','GD179','TA179','W 179','RE179','OS179','IR179','PT179'],\
['HF180','LU180','YB180','LU180','YB180','TM180','ER180','HO180','DY180','TB180','GD180','TA180','W 180','RE180','OS180','IR180','PT180','AU180'],\
['TA180'],\
['TA181','HF181','LU181','YB181','LU181','YB181','TM181','ER181','HO181','DY181','TB181','GD181','W 181','RE181','OS181','IR181','PT181','AU181'],\
['W 180','RE180','OS180','IR180','PT180','AU180'],\
['W 182','TA182','HF182','LU182','YB182','LU182','YB182','TM182','ER182','HO182','DY182','TB182','GD182','RE182','OS182','IR182','PT182','AU182'],\
['W 183','TA183','HF183','LU183','YB183','LU183','YB183','TM183','ER183','HO183','DY183','TB183','GD183','RE183','OS183','IR183','PT183','AU183'],\
['W 184','TA184','HF184','LU184','YB184','LU184','YB184','TM184','ER184','HO184','DY184','TB184','GD184','RE184'],\
['W 186','TA186','HF186','LU186','YB186','LU186','YB186','TM186','ER186','HO186','DY186','TB186','GD186'],\
['RE185','W 185','TA185','HF185','LU185','YB185','LU185','YB185','TM185','ER185','HO185','DY185','TB185','GD185','OS185','IR185','PT185','AU185','HG185','TL185'],\
['RE187','W 187','TA187','HF187','LU187','YB187','LU187','YB187','TM187','ER187','HO187','DY187','TB187','GD187'],\
['OS184','IR184','PT184','AU184','HG184','TL184'],\
['OS186','RE186','IR186','PT186','AU186','HG186','TL186'],\
['OS187','IR187','PT187','AU187','HG187','TL187','PB187'],\
['OS188','RE188','W 188','TA188','HF188','LU188','YB188','LU188','YB188','TM188','ER188','HO188','DY188','TB188','GD188','IR188','PT188','AU188','HG188','TL188','PB188'],\
['OS189','RE189','W 189','TA189','HF189','LU189','YB189','LU189','YB189','TM189','ER189','HO189','DY189','TB189','GD189','IR189','PT189','AU189','HG189','TL189','PB189'],\
['OS190','RE190','W 190','TA190','HF190','LU190','YB190','LU190','YB190','TM190','ER190','HO190','DY190','TB190','GD190','IR190'],\
['OS192','RE190','W 192','TA192','HF192','LU192','YB192','LU192','YB192','TM192','ER192','HO192','DY192','TB192','GD192'],\
['IR191','OS191','RE191','W 191','TA191','HF191','LU191','YB191','LU191','YB191','TM191','ER191','HO191','DY191','TB191','GD191','PT191','AU191','HG191','TL191','PB191'],\
['IR193','OS193','RE193','W 193','TA193','HF193','LU193','YB193','LU193','YB193','TM193','ER193','HO193','DY193','TB193','GD193','PT193','AU193','HG193','TL193','PB193'],\
['PT190','AU190','HG190','TL190','PB190'],\
['PT192','IR192','AU192','HG192','TL192','PB192'],\
['PT194','IR194','OS194','RE194','W 194','TA194','HF194','AU194','HG194','TL194','PB194','BI194'],
['PT195','IR195','OS195','RE195','W 195','TA195','HF195','AU195','HG195','TL195','PB195','BI195'],\
['PT196','IR196','OS196','RE196','W 196','TA196','HF196','AU196'],\
['PT198','IR198','OS198','RE198','W 198','TA198','HF198'],\
['AU197','PT197','IR197','OS197','RE197','W 197','TA197','HF197','HG197','TL197','PB197','BI197'],\
['HG196','TL196','PB196','BI196'],\
['HG198','AU198','TL198','PB198','BI198'],\
['HG199','AU199','PT199','IR199','OS199','RE199','W 199','TA199','HF199','TL199','PB199','BI199'],\
['HG200','AU200','PT200','IR200','OS200','RE200','W 200','TA200','HF200','TL200','PB200','BI200'],\
['HG201','AU201','PT201','IR201','OS201','RE201','W 201','TA201','HF201','TL201','PB201','BI201','PO201'],\
['HG202','AU202','PT202','IR202','OS202','RE202','W 202','TA202','HF202','TL202','PB202','BI202','PO202'],\
['HG204','AU204','PT204','IR204','OS204','RE204','W 204','TA204','HF204'],\
['TL203','HG203','AU203','PT203','IR203','OS203','RE203','W 203','TA203','HF203','PB203','BI203','PO203'],\
['TL205','HG205','AU205','PT205','IR205','OS205','RE205','W 205','TA205','HF205','PB205','BI205','PO205'],\
['PB204','TL204','BI204','PO204'],\
['PB206','TL206','HG206','AU206','PT206','IR206','OS206','RE206','W 206','TA206','HF206','BI206','PO210'],\
['PB207','TL207','HG207','AU207','PT207','IR207','OS207','RE207','W 207','TA207','HF207','BI207','PO211','BI211'],\
['PB208','TL208','HG208','AU208','PT208','IR208','OS208','RE208','W 208','TA208','HF208','BI208','PO212','BI212'],\
['BI209','PB209','TL209','HG209','AU209','PT209','IR209','OS209','RE209','W 209','TA209','HF209'],\
['TH232','AC232','RA232','FR232','RN232'],\
['U 235','PA235','TH235','AC235','RA235','FR235','RN235'],\
['U 238','PA238','TH238','AC238','RA238','FR238','RN238']] | provide the list of stable species, and decay path feeding stables | entailment |
def give_zip_element_z_and_names(element_name):
''' create 2 indexes that, given the name of the element/specie, give the atomic number.'''
#import numpy as np
global z_bismuth
z_bismuth = 83
global z_for_elem
z_for_elem = []
global index_stable
index_stable = []
i_for_stable = 1
i_for_unstable = 0
for i in range(z_bismuth):
z_for_elem.append(int(i+1))
# the only elements below bismuth with no stable isotopes are Tc and Pm
if i+1 == 43 or i+1 == 61:
index_stable.append(i_for_unstable)
else:
index_stable.append(i_for_stable)
dummy_index = np.zeros(len(element_name))
for i in range(len(element_name)):
if element_name[i] == 'Neutron':
dummy_index[i] = 0
elif element_name[i] == 'H':
dummy_index[i] = 1
elif element_name[i] == 'He':
dummy_index[i] = 2
elif element_name[i] == 'Li':
dummy_index[i] = 3
elif element_name[i] == 'Be':
dummy_index[i] = 4
elif element_name[i] == 'B':
dummy_index[i] = 5
elif element_name[i] == 'C':
dummy_index[i] = 6
elif element_name[i] == 'N':
dummy_index[i] = 7
elif element_name[i] == 'O':
dummy_index[i] = 8
elif element_name[i] == 'F':
dummy_index[i] = 9
elif element_name[i] == 'Ne':
dummy_index[i] = 10
elif element_name[i] == 'Na':
dummy_index[i] = 11
elif element_name[i] == 'Mg':
dummy_index[i] = 12
elif element_name[i] == 'Al':
dummy_index[i] = 13
elif element_name[i] == 'Si':
dummy_index[i] = 14
elif element_name[i] == 'P':
dummy_index[i] = 15
elif element_name[i] == 'S':
dummy_index[i] = 16
elif element_name[i] == 'Cl':
dummy_index[i] = 17
elif element_name[i] == 'Ar':
dummy_index[i] = 18
elif element_name[i] == 'K':
dummy_index[i] = 19
elif element_name[i] == 'Ca':
dummy_index[i] = 20
elif element_name[i] == 'Sc':
dummy_index[i] = 21
elif element_name[i] == 'Ti':
dummy_index[i] = 22
elif element_name[i] == 'V':
dummy_index[i] = 23
elif element_name[i] == 'Cr':
dummy_index[i] = 24
elif element_name[i] == 'Mn':
dummy_index[i] = 25
elif element_name[i] == 'Fe':
dummy_index[i] = 26
elif element_name[i] == 'Co':
dummy_index[i] = 27
elif element_name[i] == 'Ni':
dummy_index[i] = 28
elif element_name[i] == 'Cu':
dummy_index[i] = 29
elif element_name[i] == 'Zn':
dummy_index[i] = 30
elif element_name[i] == 'Ga':
dummy_index[i] = 31
elif element_name[i] == 'Ge':
dummy_index[i] = 32
elif element_name[i] == 'As':
dummy_index[i] = 33
elif element_name[i] == 'Se':
dummy_index[i] = 34
elif element_name[i] == 'Br':
dummy_index[i] = 35
elif element_name[i] == 'Kr':
dummy_index[i] = 36
elif element_name[i] == 'Rb':
dummy_index[i] = 37
elif element_name[i] == 'Sr':
dummy_index[i] = 38
elif element_name[i] == 'Y':
dummy_index[i] = 39
elif element_name[i] == 'Zr':
dummy_index[i] = 40
elif element_name[i] == 'Nb':
dummy_index[i] = 41
elif element_name[i] == 'Mo':
dummy_index[i] = 42
elif element_name[i] == 'Tc':
dummy_index[i] = 43
elif element_name[i] == 'Ru':
dummy_index[i] = 44
elif element_name[i] == 'Rh':
dummy_index[i] = 45
elif element_name[i] == 'Pd':
dummy_index[i] = 46
elif element_name[i] == 'Ag':
dummy_index[i] = 47
elif element_name[i] == 'Cd':
dummy_index[i] = 48
elif element_name[i] == 'In':
dummy_index[i] = 49
elif element_name[i] == 'Sn':
dummy_index[i] = 50
elif element_name[i] == 'Sb':
dummy_index[i] = 51
elif element_name[i] == 'Te':
dummy_index[i] = 52
elif element_name[i] == 'I':
dummy_index[i] = 53
elif element_name[i] == 'Xe':
dummy_index[i] = 54
elif element_name[i] == 'Cs':
dummy_index[i] = 55
elif element_name[i] == 'Ba':
dummy_index[i] = 56
elif element_name[i] == 'La':
dummy_index[i] = 57
elif element_name[i] == 'Ce':
dummy_index[i] = 58
elif element_name[i] == 'Pr':
dummy_index[i] = 59
elif element_name[i] == 'Nd':
dummy_index[i] = 60
elif element_name[i] == 'Pm':
dummy_index[i] = 61
elif element_name[i] == 'Sm':
dummy_index[i] = 62
elif element_name[i] == 'Eu':
dummy_index[i] = 63
elif element_name[i] == 'Gd':
dummy_index[i] = 64
elif element_name[i] == 'Tb':
dummy_index[i] = 65
elif element_name[i] == 'Dy':
dummy_index[i] = 66
elif element_name[i] == 'Ho':
dummy_index[i] = 67
elif element_name[i] == 'Er':
dummy_index[i] = 68
elif element_name[i] == 'Tm':
dummy_index[i] = 69
elif element_name[i] == 'Yb':
dummy_index[i] = 70
elif element_name[i] == 'Lu':
dummy_index[i] = 71
elif element_name[i] == 'Hf':
dummy_index[i] = 72
elif element_name[i] == 'Ta':
dummy_index[i] = 73
elif element_name[i] == 'W':
dummy_index[i] = 74
elif element_name[i] == 'Re':
dummy_index[i] = 75
elif element_name[i] == 'Os':
dummy_index[i] = 76
elif element_name[i] == 'Ir':
dummy_index[i] = 77
elif element_name[i] == 'Pt':
dummy_index[i] = 78
elif element_name[i] == 'Au':
dummy_index[i] = 79
elif element_name[i] == 'Hg':
dummy_index[i] = 80
elif element_name[i] == 'Tl':
dummy_index[i] = 81
elif element_name[i] == 'Pb':
dummy_index[i] = 82
elif element_name[i] == 'Bi':
dummy_index[i] = 83
elif element_name[i] == 'Po':
dummy_index[i] = 84
elif element_name[i] == 'At':
dummy_index[i] = 85
#if spe[0] == 'N 1':
# znum_int[0] = 0
# here the index to connect name and atomic numbers.
global index_z_for_elements
index_z_for_elements = {}
for a,b in zip(element_name,dummy_index):
index_z_for_elements[a]=b | create 2 indexes that, given the name of the element/specie, give the atomic number. | entailment |
def get_el_from_z(z):
'''
Very simple Vfunction that gives the atomic number AS A STRING when given the element symbol.
Uses predefined a dictionnary.
Parameter :
z : string or number
For the other way, see get_z_from_el
'''
if(type(z)==float):
z=int(z)
if(type(z)==int):
z=str(z)
dict_z={'24': 'Cr', '25': 'Mn', '26': 'Fe', '27': 'Co', '20': 'Ca', '21': 'Sc', '22': 'Ti', '23': 'V', '28': 'Ni', '29': 'Cu', '4': 'Be', '8': 'O', '59': 'Pr', '58': 'Ce', '55': 'Cs', '54': 'Xe', '57': 'La', '56': 'Ba', '51': 'Sb', '50': 'Sn', '53': 'I', '52': 'Te', '88': 'Ra', '89': 'Ac', '82': 'Pb', '83': 'Bi', '80': 'Hg', '81': 'Tl', '86': 'Rn', '87': 'Fr', '84': 'Po', '85': 'At', '3': 'Li', '7': 'N', '39': 'Y', '38': 'Sr', '33': 'As', '32': 'Ge', '31': 'Ga', '30': 'Zn', '37': 'Rb', '36': 'Kr', '35': 'Br', '34': 'Se', '60': 'Nd', '61': 'Pm', '62': 'Sm', '63': 'Eu', '64': 'Gd', '65': 'Tb', '66': 'Dy', '67': 'Ho', '68': 'Er', '69': 'Tm', '2': 'He', '6': 'C', '91': 'Pa', '90': 'Th', '92': 'U', '11': 'Na', '10': 'Ne', '13': 'Al', '12': 'Mg', '15': 'P', '14': 'Si', '17': 'Cl', '16': 'S', '19': 'K', '18': 'Ar', '48': 'Cd', '49': 'In', '46': 'Pd', '47': 'Ag', '44': 'Ru', '45': 'Rh', '42': 'Mo', '43': 'Tc', '40': 'Zr', '41': 'Nb', '1': 'H', '5': 'B', '9': 'F', '77': 'Ir', '76': 'Os', '75': 'Re', '74': 'W', '73': 'Ta', '72': 'Hf', '71': 'Lu', '70': 'Yb', '79': 'Au', '78': 'Pt'}
return dict_z[z] | Very simple Vfunction that gives the atomic number AS A STRING when given the element symbol.
Uses predefined a dictionnary.
Parameter :
z : string or number
For the other way, see get_z_from_el | entailment |
def fit(self, x, y, dcoef='none'):
'''
performs the fit
x, y : list
Matching data arrays that define a numerical function y(x),
this is the data to be fitted.
dcoef : list or string
You can provide a different guess for the coefficients, or
provide the string 'none' to use the inital guess. The
default is 'none'.
Returns
-------
ierr
Values between 1 and 4 signal success.
Notes
-----
self.fcoef, contains the fitted coefficients.
'''
self.x = x
self.y = y
if dcoef is not 'none':
coef = dcoef
else:
coef = self.coef
fcoef=optimize.leastsq(self.residual,coef,args=(y,self.func,x))
self.fcoef = fcoef[0].tolist()
return fcoef[1] | performs the fit
x, y : list
Matching data arrays that define a numerical function y(x),
this is the data to be fitted.
dcoef : list or string
You can provide a different guess for the coefficients, or
provide the string 'none' to use the inital guess. The
default is 'none'.
Returns
-------
ierr
Values between 1 and 4 signal success.
Notes
-----
self.fcoef, contains the fitted coefficients. | entailment |
def plot(self, ifig=1, data_label='data', fit_label='fit',
data_shape='o', fit_shape='-'):
'''
plot the data and the fitted function.
Parameters
----------
ifig : integer
Figure window number. The default is 1.
data_label : string
Legend for data. The default is 'data'.
fit_label : string
Legend for fit. If fit_lable is 'fit', then substitute fit
function type self.func_name. The default is 'fit'.
data_shape : character
Shape for data. The default is 'o'.
fit_shape : character
Shape for fit. The default is '-'.
'''
if len(self.coef) is not len(self.fcoef):
print("Warning: the fitted coefficient list is not same")
print(" length as guessed list - still I will try ...")
pl.figure(ifig)
pl.plot(self.x,self.y,data_shape,label=data_label)
if fit_label is 'fit':
fit_label=self.__name__
pl.plot(self.x,self.func(self.fcoef,self.x),fit_shape,label=fit_label)
pl.legend() | plot the data and the fitted function.
Parameters
----------
ifig : integer
Figure window number. The default is 1.
data_label : string
Legend for data. The default is 'data'.
fit_label : string
Legend for fit. If fit_lable is 'fit', then substitute fit
function type self.func_name. The default is 'fit'.
data_shape : character
Shape for data. The default is 'o'.
fit_shape : character
Shape for fit. The default is '-'. | entailment |
def _stable_names(self):
'''
This private method extracts the element names from stable_el.
Note that stable_names is a misnomer as stable_el also contains
unstable element names with a number 999 for the *stable* mass
numbers. (?!??)
'''
stable_names=[]
for i in range(len(self.stable_el)):
stable_names.append(self.stable_el[i][0])
self.stable_names=stable_names | This private method extracts the element names from stable_el.
Note that stable_names is a misnomer as stable_el also contains
unstable element names with a number 999 for the *stable* mass
numbers. (?!??) | entailment |
def _process_abundance_vector(self, a, z, isomers, yps):
'''
This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers.
'''
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
tmp=[]
isom=[]
for i in range(len(a)):
if z[i]!=0 and isomers[i]==1: #if its not 'NEUt and not an isomer'
tmp.append([self.stable_names[int(z[i])]+'-'+str(int(a[i])),yps[i],z[i],a[i]])
elif isomers[i]!=1: #if it is an isomer
if yps[i]==0:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),1e-99])
else:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),yps[i]])
tmp.sort(key = cmp_to_key(self.compar))
tmp.sort(key = cmp_to_key(self.comparator))
abunds=[]
isotope_to_plot=[]
z_iso_to_plot=[]
a_iso_to_plot=[]
el_iso_to_plot=[]
for i in range(len(tmp)):
isotope_to_plot.append(tmp[i][0])
abunds.append(tmp[i][1])
z_iso_to_plot.append(int(tmp[i][2]))
a_iso_to_plot.append(int(tmp[i][3]))
el_iso_to_plot.append(self.stable_names[int(tmp[i][2])])
return a_iso_to_plot,z_iso_to_plot,abunds,isotope_to_plot,el_iso_to_plot,isom | This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers. | entailment |
def compar(self, x, y):
'''
simple comparator method
'''
indX=0
indY=0
a= int(x[0].split('-')[1])
b= int(y[0].split('-')[1])
if a>b:
return 1
if a==b:
return 0
if a<b:
return -1 | simple comparator method | entailment |
def comparator(self, x, y):
'''
simple comparator method
'''
indX=0
indY=0
for i in range(len(self.stable_names)):
if self.stable_names[i] == x[0].split('-')[0]:
indX=i
if self.stable_names[i] == y[0].split('-')[0]:
indY=i
if indX>indY:
return 1
if indX==indY:
return 0
if indX<indY:
return -1 | simple comparator method | entailment |
def _read_isotopedatabase(self, ffname='isotopedatabase.txt'):
'''
This private method reads the isotopedatabase.txt file in sldir
run dictory and returns z, a, elements, the cutoff mass for each
species that delineate beta+ and beta- decay and the logical in
the last column. Also provides charge_from_element dictionary
according to isotopedatabase.txt.
'''
name=self.sldir+ffname
z_db, a_db, el_db, stable_a_db,logic_db=\
np.loadtxt(name,unpack=True,dtype='str')
z_db=np.array(z_db,dtype='int')
a_db=np.array(a_db,dtype='int')
stable_a_db=np.array(stable_a_db,dtype='int')
# charge number for element name from dictionary in isotopedatabase.txt
charge_from_element_name={}
for name in self.stable_names:
if name=='Neutron' or name=='Neut' or name=='NEUT' or name=='N-1':
name='nn'
try:
zz=z_db[np.where(el_db==name)][0]
charge_from_element_name[name]=zz
except IndexError:
print(name+" does not exist in this run")
return z_db, a_db, el_db, stable_a_db,logic_db,charge_from_element_name | This private method reads the isotopedatabase.txt file in sldir
run dictory and returns z, a, elements, the cutoff mass for each
species that delineate beta+ and beta- decay and the logical in
the last column. Also provides charge_from_element dictionary
according to isotopedatabase.txt. | entailment |
def decay_indexpointer(self):
'''
This private method provides decay indexpointers which allow to
instantaneously decay an abundance vector. These are attributes
are.
Parameters
=================
decay_idp : list
points in the iso_to_plot (i.e. the undecayed abundance
vector index space) to the decay target.
idp_to_stables_in_isostoplot : list
points to the stable isotopes in the undecayed abundance
vector index space.
Notes
-----
For an application example see ppn.py-abu_vector-_getcycle.
'''
a_iso_to_plot =self.a_iso_to_plot
isotope_to_plot =self.isotope_to_plot
z_iso_to_plot =self.z_iso_to_plot
el_iso_to_plot =self.el_iso_to_plot
abunds =self.abunds
isom =self.isom
z_db, a_db, el_db, stable_a_db,logic_db,charge_from_element_name=\
self._read_isotopedatabase()
# find out which species beta+ and which beta- decay:
beta=np.sign(stable_a_db-a_db) # if a species is unstable and if beta < 0 => beta- decay
# else beta > 0 => beta+ decay
# now we need an index array on the scale of the abundance
# distribution to be plotted that points to itself for stable species,
# and to the stable element to which it decays in case of an unstable
# species
decay_index_pointer=np.zeros(len(isotope_to_plot), dtype='int')-1
idp_to_stables_in_isostoplot=[]
for i in range(len(isotope_to_plot)):
element_name=isotope_to_plot[i].split('-')[0]
try:
stable_a=stable_a_db[np.where(el_db==element_name)][0] # 4th column for that element in isotopedatabase.txt
except IndexError:
print("Can't find element "+element_name+" in isotopedatabase.txt")
if a_iso_to_plot[i] <= 209 and stable_a <=209: # Bi209 is last stable element
stable_mass_numbers=self.stable_el[self.stable_names.index(element_name)][1:]
iso_db_index_range_el=np.where(el_db==element_name)
beta_for_this_species=\
int(beta[iso_db_index_range_el][np.where(a_db[iso_db_index_range_el]==a_iso_to_plot[i])])
if beta_for_this_species == 0: # if there are no stable species for an element (Tc,Pm) the cutoff specifies
beta_for_this_species = -1 # the lowest mass beta- isotope
if a_iso_to_plot[i] in stable_mass_numbers:
# print isotope_to_plot[i]+" is stable"
decay_index_pointer[i]=i
idp_to_stables_in_isostoplot.append(i)
elif a_iso_to_plot[i]==8: # Be8 -> He4
decay_index_pointer[i]=isotope_to_plot.index('He-4')
else: # beta decay
found_decay_target=False
i_search=-1*beta_for_this_species
while not found_decay_target:
try:
try_target_el=self.stable_names[charge_from_element_name[element_name]+i_search]
except TypeError:
print("Maybe information about species "+isotope_to_plot[i]+" is not available in isotopedatabase.txt")
decay_index_pointer[i]=-1
break
# print try_target_el
try:
stable_mass_numbers=self.stable_el[self.stable_names.index(try_target_el)][1:]
except ValueError:
print("Can not find decay target for "+isotope_to_plot[i])
if a_iso_to_plot[i] in stable_mass_numbers:
ind_range=np.where(np.array(el_iso_to_plot)==try_target_el)[0]
if a_iso_to_plot[i] in np.array(a_iso_to_plot)[ind_range]:
this_ind=\
ind_range[np.where(np.array(a_iso_to_plot)[ind_range]==a_iso_to_plot[i])[0]]
# print isotope_to_plot[i]+" is unstable and decays to "+isotope_to_plot[this_ind]
decay_index_pointer[i]=this_ind
else:
print("It seems unstable species "+isotope_to_plot[i]+" wants to decay to " \
+try_target_el+"-"+str(a_iso_to_plot[i])+", however this species is not in this run." \
+" This points to an inconsistency in the network build. Here we will ignore the abundance of " \
+isotope_to_plot[i]+'.')
decay_index_pointer[i]=-1
found_decay_target=True
else:
i_search += -1*beta_for_this_species
if self.debug:
print("Decay rules:")
for i in range(len(isotope_to_plot)):
if decay_index_pointer[i]>= 0:
print(isotope_to_plot[i]+" -> "+isotope_to_plot[decay_index_pointer[i]])
ind_tmp=idp_to_stables_in_isostoplot
#ind_tmp=utils.strictly_monotonic(decay_index_pointer) # this would do the same, but the method above is more straight forward
self.decay_idp=decay_index_pointer
self.idp_to_stables_in_isostoplot=ind_tmp | This private method provides decay indexpointers which allow to
instantaneously decay an abundance vector. These are attributes
are.
Parameters
=================
decay_idp : list
points in the iso_to_plot (i.e. the undecayed abundance
vector index space) to the decay target.
idp_to_stables_in_isostoplot : list
points to the stable isotopes in the undecayed abundance
vector index space.
Notes
-----
For an application example see ppn.py-abu_vector-_getcycle. | entailment |
def is_stable(self,species):
'''
This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013.
'''
element_name_of_iso = species.split('-')[0]
try:
a_of_iso = int(species.split('-')[1])
except ValueError: # if the species name contains in addition to the
# mass number some letters, e.g. for isomere, then
# we assume it is unstable. This is not correct but
# related to the fact that in nugridse.py we do not
# identify species properly by the three numbers A, Z
# and isomeric_state. We should do that!!!!!!
a_of_iso = 999
idp_of_element_in_stable_names = self.stable_names.index(element_name_of_iso)
if a_of_iso in self.stable_el[idp_of_element_in_stable_names][1:]:
return True
else:
return False | This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013. | entailment |
def write(self, outfile='initial_abundance.dat',
header_string='initial abundances for a PPN run'):
'''
Write initial abundance file (intended for use with ppn)
Parameters
----------
outfile : string
Name of output file. The default is
'initial_abundance.dat'.
header_string : string
A string with header line. The default is
'initial abundances for a PPN run'.
'''
dcols=['Z', 'species','mass fraction']
data=[self.z,self.names,self.abu]
hd=[header_string]
att.write(outfile,hd,dcols,data) | Write initial abundance file (intended for use with ppn)
Parameters
----------
outfile : string
Name of output file. The default is
'initial_abundance.dat'.
header_string : string
A string with header line. The default is
'initial abundances for a PPN run'. | entailment |
def write_mesa(self, mesa_isos_file='isos.txt',
add_excess_iso='fe56', outfile='xa_iniabu.dat',
header_string='initial abundances for a MESA run',
header_char='!'):
'''
Write initial abundance file, returns written abundances and
mesa names.
Parameters
----------
mesa_isos_file : string, optional
List with isos copied from mesa network definition file in
mesa/data/net_data/nets. The default is 'isos.txt'.
add_excess_iso : string, optional
Add 1.-sum(isos in mesa net) to this isotope. The defualt
is 'fe56'.
outfile : string, optional
name of output file. The default file is 'xa_iniabu.dat'.
header_string : string, optional
Srting with header line. The default is
'initial abundances for a MESA run'.
header_char : character, optional
The default is '!'.
Examples
--------
>>> from NuGridPy import utils
>>> !ls ~/PPN/forum.astro.keele.ac.uk/frames/mppnp/USEEPP/ # find ppn initial abundance file
>>> !cat ~/mesa/data/net_data/nets/agb.net # find isos needed in mesa net
>>> !cat > isos.txt # paste needed isos into file
>>> help(utils.iniabu) # check documentation of method
>>> x=utils.iniabu('path_to_here/forum.astro.keele.ac.uk/frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn')
>>> x.write_mesa?
>>> mnames,mabus = x.write_mesa(add_excess_iso='ne22',
... header_string='mppnp/USEEPP/iniab2.0E-02GN93.ppn for mesa/agb.net',
... outfile='xa_2.0E-02GN93.mesa')
'''
f=open('isos.txt')
a=f.readlines()
isos=[]
for i in range(len(a)):
isos.append(a[i].strip().rstrip(','))
mesa_names=[]
abus=[]
for i in range(len(self.z)):
b=self.names[i].split()
a=''
a=a.join(b)
if a in isos:
mesa_names.append(a)
abus.append(self.abu[i])
# mesa_names.append(elements_names[int(x.z[i])].lower()+str(int(x.a[i])))
for i in range(len(isos)):
if isos[i] not in mesa_names:
mesa_names.append(isos[i])
abus.append(0.0)
excess=1.-np.sum(np.array(abus))
abus=np.array(abus)
abus[mesa_names.index(add_excess_iso)] += excess
dcols=['','']
data=[mesa_names,abus]
hd=[header_string]
att.write(outfile,hd,dcols,data,header_char=header_char)
return mesa_names,abus | Write initial abundance file, returns written abundances and
mesa names.
Parameters
----------
mesa_isos_file : string, optional
List with isos copied from mesa network definition file in
mesa/data/net_data/nets. The default is 'isos.txt'.
add_excess_iso : string, optional
Add 1.-sum(isos in mesa net) to this isotope. The defualt
is 'fe56'.
outfile : string, optional
name of output file. The default file is 'xa_iniabu.dat'.
header_string : string, optional
Srting with header line. The default is
'initial abundances for a MESA run'.
header_char : character, optional
The default is '!'.
Examples
--------
>>> from NuGridPy import utils
>>> !ls ~/PPN/forum.astro.keele.ac.uk/frames/mppnp/USEEPP/ # find ppn initial abundance file
>>> !cat ~/mesa/data/net_data/nets/agb.net # find isos needed in mesa net
>>> !cat > isos.txt # paste needed isos into file
>>> help(utils.iniabu) # check documentation of method
>>> x=utils.iniabu('path_to_here/forum.astro.keele.ac.uk/frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn')
>>> x.write_mesa?
>>> mnames,mabus = x.write_mesa(add_excess_iso='ne22',
... header_string='mppnp/USEEPP/iniab2.0E-02GN93.ppn for mesa/agb.net',
... outfile='xa_2.0E-02GN93.mesa') | entailment |
def set_and_normalize(self,species_hash):
'''
species_hash is a hash array in which you provide abundances
referenced by species names that you want to set to some
particular value; all other species are then normalised so that
the total sum is 1.
Examples
--------
You can set up the argument array for this method for example
in the following way.
>>> sp={}
>>> sp['he 4']=0.2
>>> sp['h 1']=0.5
'''
sum_before = sum(self.abu)
for i in range(len(species_hash)):
sum_before -= self.abu[self.hindex[list(species_hash.keys())[i]]]
print("sum_before = "+str(sum_before))
normalization_factor=old_div(1.0-sum(species_hash.values()),sum_before)
print("normalizing the rest witih factor "+str(normalization_factor))
self.abu *= normalization_factor
for i in range(len(species_hash)):
self.abu[self.hindex[list(species_hash.keys())[i]]]=list(species_hash.values())[i]
for l in range(len(self.abu)):
if self.abu[l] <= 1e-99: #otherwise we might write e-100 which will be read as e-10 by ppn
self.abu[l] = 1.0e-99
for name in self.habu:
self.habu[name]=self.abu[self.hindex[name]] | species_hash is a hash array in which you provide abundances
referenced by species names that you want to set to some
particular value; all other species are then normalised so that
the total sum is 1.
Examples
--------
You can set up the argument array for this method for example
in the following way.
>>> sp={}
>>> sp['he 4']=0.2
>>> sp['h 1']=0.5 | entailment |
def isoratio_init(self,isos):
'''
This file returns the isotopic ratio of two isotopes specified
as iso1 and iso2. The isotopes are given as, e.g.,
['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility)
-> list.
'''
if len(isos) == 2:
dumb = []
dumb = isos[0].split('-')
dumb.append(isos[1].split('-')[0])
dumb.append(isos[1].split('-')[1])
isos = dumb
ssratio = old_div(self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)], self.habu[isos[2].ljust(2).lower() + str(int(isos[3])).rjust(3)])
return ssratio | This file returns the isotopic ratio of two isotopes specified
as iso1 and iso2. The isotopes are given as, e.g.,
['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility)
-> list. | entailment |
def iso_abundance(self,isos):
'''
This routine returns the abundance of a specific isotope.
Isotope given as, e.g., 'Si-28' or as list
['Si-28','Si-29','Si-30']
'''
if type(isos) == list:
dumb = []
for it in range(len(isos)):
dumb.append(isos[it].split('-'))
ssratio = []
isos = dumb
for it in range(len(isos)):
ssratio.append(self.habu[isos[it][0].ljust(2).lower() + str(int(isos[it][1])).rjust(3)])
else:
isos = isos.split('-')
ssratio = self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)]
return ssratio | This routine returns the abundance of a specific isotope.
Isotope given as, e.g., 'Si-28' or as list
['Si-28','Si-29','Si-30'] | entailment |
def drive_rotational_speed_rpm(self):
"""Gets the set of rotational speed of the HDD drives"""
drv_rot_speed_rpm = set()
for member in self.get_members():
if member.rotational_speed_rpm is not None:
drv_rot_speed_rpm.add(member.rotational_speed_rpm)
return drv_rot_speed_rpm | Gets the set of rotational speed of the HDD drives | entailment |
def iso_name_converter(iso):
'''
Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database.
'''
sp = iso.split('-')
output = sp[1] + sp[0]
return output.lower() | Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database. | entailment |
def get_svnpath():
'''
This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run.
'''
svnpathtmp = __file__
splitsvnpath = svnpathtmp.split('/')
if len(splitsvnpath) == 1:
svnpath = os.path.abspath('.') + '/../../'
else:
svnpath = ''
for i in range(len(splitsvnpath)-3):
svnpath += splitsvnpath[i] + '/'
return svnpath | This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run. | entailment |
def reset_filter(self):
'''
Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it.
'''
self.header_desc = self._header_desc
self.header_data = self._header_data
self.header_style = self._header_style
self.desc = self._desc
self.data = self._data
self.style = self._style
self.descdict = self._descdict
self.datadict = self._datadict
self.styledict = self._styledict | Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it. | entailment |
def info(self, graintype=True, group=True, reference=False,
phase=True):
'''
This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False.
'''
# create a list with all graintypes
gtype_info = []
group_info = []
ref_info = []
phase_info = []
# how many grains in database
print('There are ' + str(len(self.data)) + ' grains in your database.\n')
# graintypes
if graintype:
for i in range(len(self.desc)):
gtype_tmp = self.desc[i][self.descdict['Type']]
wrtchk = True
for j in range(len(gtype_info)):
if gtype_info[j] == gtype_tmp:
wrtchk = False
break
if wrtchk:
gtype_info.append(gtype_tmp)
print('Available graintypes are:')
print('-------------------------')
print(gtype_info)
# groups
if group:
for i in range(len(self.desc)):
group_tmp = self.desc[i][self.descdict['Group']]
wrtchk = True
for j in range(len(group_info)):
if group_info[j] == group_tmp:
wrtchk = False
break
if wrtchk:
group_info.append(group_tmp)
print('\nAvailable groups of grains (for silicates and oxides) are:')
print('----------------------------------------------------------')
print(group_info)
# Phases
if phase:
for i in range(len(self.desc)):
phase_tmp = self.desc[i][self.descdict['Phase']]
wrtchk = True
for j in range(len(phase_info)):
if phase_info[j] == phase_tmp:
wrtchk = False
break
if wrtchk:
phase_info.append(phase_tmp)
print('\nAvailable Phases of grains are:')
print('----------------------------------------------------------')
print(phase_info)
# references
if reference:
for i in range(len(self.desc)):
ref_tmp = self.desc[i][self.descdict['Reference']]
wrtchk = True
for j in range(len(ref_info)):
if ref_info[j] == ref_tmp:
wrtchk = False
break
if wrtchk:
ref_info.append(ref_tmp)
print('\nReferences for grains:')
print('----------------------')
print(ref_info) | This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False. | entailment |
def filter_desc(self, graintype=None, group=None, reference=None,
size=None, phase=None):
'''
This routine is to filter for description elements. You can
check what is available in the description by running,
>>> i.header_desc()
where i is the instance you loaded.
You can run the filter multiple times! You can filter for the
following types:
Parameters
----------
graintype : string or list
Give graintypes as either 'M' for only mainstream or more
than one ['M','Z'].
group : integer or list
Group of graintypes, important for oxides and silicates,
since they are split into groups and not into types.
Example 1, or give a list [1,3].
reference : string or list
Give the reference you want to filter for, try an i.info()
to pick the right name! You can select a single
referennce as string or multiple references in as a list.
size : string
Filter for grain sizes, give '<5.0' or '>5.0' as a string
for larger or smaller than a given grainsize in um. Only
data with known grainsizes are chosen. Often grain sizes
are given in a times b, where a and b are the minumum and
maximum measurements from an image. If you give a >5.0
then grains with the smaller dimension >5um are taken into
account. If you want <5.0 then grains with the upper
dimension <5um are taken into account.
'''
# filter for graintype
if graintype != None:
indexing = []
# index file on which lines to pick
if type(graintype) == str:
graintype = [graintype]
# filter
for typ in graintype:
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Type']] == typ:
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for graintype
if phase != None:
indexing = []
# index file on which lines to pick
if type(phase) == str:
phase = [phase]
# filter
for typ in phase:
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Phase']] == typ:
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for group (oxides and silicates)
if group != None:
indexing = []
# index file on which lines to pick
if type(group) != list:
group = [group]
# filter
for grp in group:
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Group']] == str(int(grp)):
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for reference
if reference != None:
indexing = []
# index file on which lines to pick
if type(reference) != list:
reference = [reference]
# filter
for ri in range(len(reference)):
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Reference']] == reference[ri]:
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for grainzise
if size != None:
indexing = []
# index file on which lines to pick
# filter
operator = size[0:1]
size = float(size[1:len(size)])
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Size (microns)']] != '':
try:
# print self.desc[i][self.descdict['Size (microns)']]
comperator1 = self.desc[i][self.descdict['Size (microns)']].split('x')[0]
comperator2 = self.desc[i][self.descdict['Size (microns)']].split('x')[1]
comperator = [float(comperator1),float(comperator2)]
if operator == '<':
comperator = np.min(comperator)
else:
comperator = np.max(comperator)
except IndexError or AttributeError:
try:
comperator = float(self.desc[i][self.descdict['Size (microns)']])
except ValueError:
continue
if operator == '>':
if comperator > size:
indexing.append(i)
elif operator == '<':
if comperator < size:
indexing.append(i)
else:
continue
# filter:
self._filter_desc(indexing) | This routine is to filter for description elements. You can
check what is available in the description by running,
>>> i.header_desc()
where i is the instance you loaded.
You can run the filter multiple times! You can filter for the
following types:
Parameters
----------
graintype : string or list
Give graintypes as either 'M' for only mainstream or more
than one ['M','Z'].
group : integer or list
Group of graintypes, important for oxides and silicates,
since they are split into groups and not into types.
Example 1, or give a list [1,3].
reference : string or list
Give the reference you want to filter for, try an i.info()
to pick the right name! You can select a single
referennce as string or multiple references in as a list.
size : string
Filter for grain sizes, give '<5.0' or '>5.0' as a string
for larger or smaller than a given grainsize in um. Only
data with known grainsizes are chosen. Often grain sizes
are given in a times b, where a and b are the minumum and
maximum measurements from an image. If you give a >5.0
then grains with the smaller dimension >5um are taken into
account. If you want <5.0 then grains with the upper
dimension <5um are taken into account. | entailment |
def _filter_desc(self, indexing):
'''
Private function to filter data, goes with filter_desc
'''
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
style_tmp= np.zeros((len(indexing),len(self.header_style)),dtype='|S1024')
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[indexing[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
else:
print('No filter selected or no data found!') | Private function to filter data, goes with filter_desc | entailment |
def filter_single_grain(self):
'''
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
'''
my_index = 0
my_grains = [['Index','Label','Type','Group','Meteorite','Mineralogy','C12/C13','d(Si29/Si30)','d(Si30/Si29)']]
# add the data to this grain list
for it in range(len(self.data)):
my_grains.append([my_index,self.desc[it][self.descdict['Grain Label']], self.desc[it][self.descdict['Type']], self.desc[it][self.descdict['Group']], self.desc[it][self.descdict['Meteorite']], self.desc[it][self.descdict['Mineralogy']], self.data[it][self.datadict['12c/13c']], self.data[it][self.datadict['d(29si/28si)']], self.data[it][self.datadict['d(30si/28si)']]])
my_index += 1
for prt_line in my_grains:
print(prt_line)
# now write the selector for the index of the grains to select which one should be
# available and which ones should be dumped
usr_input = ''
usr_input = input('Select the grains by index that you want to use. Please separate the indeces by a comma, e.g., 1 or 0,2,3,4\n')
# process user index
if usr_input == '':
print('No data selected to filter.')
return None
elif len(usr_input) == 1:
usr_index = [usr_input]
else:
usr_index = usr_input.split(',')
for it in range(len(usr_index)):
usr_index[it] = int(usr_index[it])
# filter
desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(usr_index),len(self.header_data)))
style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype='|S1024')
for i in range(len(usr_index)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[usr_index[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[usr_index[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[usr_index[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp | This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine | entailment |
def filter_data(self, isos, limit, delta=True):
'''
This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True.
'''
# check availability
dat_index, delta_b, ratio_b = self.check_availability(isos)
if dat_index == -1:
print('Isotopes selected are not available. Check i.datadict (where i is your instance) for availability of isotopes.')
return None
# select if larger or smaller and define limit
if limit[0:1] == '>':
comperator = 'gt'
elif limit[0:1] == '<':
comperator = 'st'
else:
print('Comperator not specified. Limit must be given as \'>5.\' for example.')
return None
try:
limit = float(limit[1:len(limit)])
except ValueError:
print('Limit must be given as \'>5.\' for example.')
return None
# now calculate the actual limit to compare with, depending on if it delta or not or whatsoever
if delta == delta_b: # input and available same
if ratio_b: # one over
if delta:
tmp = self.delta_to_ratio(isos,limit,oneover=True)
comp_lim = self.ratio_to_delta(isos,tmp) # check
else:
comp_lim = old_div(1.,limit) # check
else: # all fine
comp_lim = limit
else: # input and availability not the same
if ratio_b: # one over
if delta: # delta given, ratio one over wanted
comp_lim = self.delta_to_ratio(isos,limit,oneover=True)
else: # ratio given, delta one over wanted
comp_lim = self.ratio_to_delta(isos,limit,oneover=True)
else: # not one over
if delta: # delta given, ratio wanted
comp_lim = self.delta_to_ratio(isos,limit)
else:
comp_lim = self.ratio_to_delta(isos,limit)
# indexing vector
indexing = []
for i in range(len(self.data)):
dat_val = self.data[i][dat_index]
if comperator == 'st':
if dat_val < comp_lim:
indexing.append(i)
else:
if dat_val > comp_lim:
indexing.append(i)
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
self.desc = desc_tmp
self.data = data_tmp
else:
print('No filter selected!') | This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True. | entailment |
def style_chg_label(self,type,symb=None,edc=None,fac=None,smbsz=None,edw=None,lab=None):
'''
This routine changes the plotting style that is set by default.
The style is changed according the the label that you choose.
Changing according to reference, use style_chg_ref() function!
You can change it back to default by resetting the filter using
g.reset_filter() routine, assuming that g is your instance. The
format that is used here is:
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
type : string
Select the label of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. Watch out, if you
want to do more specifications later, the type will
have changed to the new label.
'''
# do stuff for selected type
for i in range(len(self.style)):
# check if type is correct, otherwise continue directly
if self.style[i][self.styledict['Label']] == type:
# change symbol:
if symb != None:
self.style[i][self.styledict['Symbol']] = symb
# change edge color
if edc != None:
self.style[i][self.styledict['Edge color']] = edc
# change face color
if fac != None:
self.style[i][self.styledict['Face color']] = fac
# change symbol size
if smbsz != None:
self.style[i][self.styledict['Symbol size']] = smbsz
# change edge width
if edw != None:
self.style[i][self.styledict['Edge width']] = edw
# change label
if lab != None:
self.style[i][self.styledict['Label']] = lab | This routine changes the plotting style that is set by default.
The style is changed according the the label that you choose.
Changing according to reference, use style_chg_ref() function!
You can change it back to default by resetting the filter using
g.reset_filter() routine, assuming that g is your instance. The
format that is used here is:
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
type : string
Select the label of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. Watch out, if you
want to do more specifications later, the type will
have changed to the new label. | entailment |
def style_chg_ref(self,ref,symb=None,edc=None,fac=None,smbsz=None,edw=None,lab=None):
'''
This routine changes the plotting style that is set by default.
The style is changed according the the reference of the paper
as given in the grain database. For change according to type of
grain, use the routine syle_chg_label().
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
ref : string
Select the reference of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change.
'''
# do stuff for selected reference
for i in range(len(self.style)):
# check if reference is correct, otherwise continue directly
if self.desc[i][self.descdict['Reference']] == ref:
# change symbol:
if symb != None:
self.style[i][self.styledict['Symbol']] = symb
# change edge color
if edc != None:
self.style[i][self.styledict['Edge color']] = edc
# change face color
if fac != None:
self.style[i][self.styledict['Face color']] = fac
# change symbol size
if smbsz != None:
self.style[i][self.styledict['Symbol size']] = smbsz
# change edge width
if edw != None:
self.style[i][self.styledict['Edge width']] = edw
# change label
if lab != None:
self.style[i][self.styledict['Label']] = lab | This routine changes the plotting style that is set by default.
The style is changed according the the reference of the paper
as given in the grain database. For change according to type of
grain, use the routine syle_chg_label().
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
ref : string
Select the reference of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. | entailment |
def plot_ratio_return(self, isox, isoy, deltax=True, deltay=True):
'''
This routine returns data isotopic data to plot from the
filtered list of data.
Parameters
----------
isox : list
Isotopes for x axis in standard format ['Si-28','Si-30'].
isoy : list
Same as isox but for y axis.
deltax : boolean, optional
If true then x-axis values are in delta format. The default
is True.
deltay : boolean, optional
Same as for x-axis but for y-axis. The default is True.
Returns
-------
grpl_xdata
grain plot x-axis data.
grpl_xerr
x-axis error bars.
grpl_ydata
grain plot y-axis data.
grpl_yerr
y-axis error bars.
grpl_style
style data for the different symbols.
'''
# check availability
index_x, delta_b_x, ratio_b_x = self.check_availability(isox)
index_y, delta_b_y, ratio_b_y = self.check_availability(isoy)
if index_x == -1 or index_y == -1:
print('Following input data are not available in the database. Revise your input.')
if index_x == -1:
print('x axis data not available')
if index_y == -1:
print('y axis data not available')
return None
# create x and y data as 1d vectors, also error bars
xdata_vec = np.zeros((len(self.data)))
ydata_vec = np.zeros((len(self.data)))
xdata_err = np.zeros((len(self.data)))
ydata_err = np.zeros((len(self.data)))
for it in range(len(self.data)):
xdata_vec[it] = self.data[it][index_x]
ydata_vec[it] = self.data[it][index_y]
xdata_err[it] = self.data[it][index_x+1]
ydata_err[it] = self.data[it][index_y+1]
# index data that are nan
index_nan = []
for it in range(len(xdata_vec)):
if np.isnan(xdata_vec[it]) or np.isnan(ydata_vec[it]):
index_nan.append(it)
# make range of all incides
index_filtered = list(range(len(xdata_vec)))
for it in range(len(index_nan)):
index_filtered.remove(index_nan[it])
xdata_tmp = np.zeros((len(index_filtered)))
ydata_tmp = np.zeros((len(index_filtered)))
xerr_tmp = np.zeros((len(index_filtered)))
yerr_tmp = np.zeros((len(index_filtered)))
style_plt = np.zeros((len(index_filtered),len(self.header_style)),dtype='|S1024')
for i in range(len(index_filtered)):
xdata_tmp[i] = xdata_vec[index_filtered[i]]
ydata_tmp[i] = ydata_vec[index_filtered[i]]
xerr_tmp[i] = xdata_err[index_filtered[i]]
yerr_tmp[i] = ydata_err[index_filtered[i]]
for j in range(len(style_plt[i])):
style_plt[i][j] = self.style[index_filtered[i]][j]
xdata_vec = xdata_tmp
ydata_vec = ydata_tmp
xdata_err = xerr_tmp
ydata_err = yerr_tmp
# loop through error and set nans to 0
for i in range(len(xdata_err)):
if np.isnan(xdata_err[i]):
xdata_err[i] = 0.
if np.isnan(ydata_err[i]):
ydata_err[i] = 0.
# make start stop index for groups
start_stop = []
start = 0
for it in range(len(xdata_vec)-1):
if (style_plt[it] == style_plt[it+1]).all():
continue
else:
stop = it + 1
start_stop.append([start,stop])
start = stop
# last entry
if start_stop == []:
start_stop.append([0,len(xdata_vec)])
else:
start_stop.append([start_stop[len(start_stop)-1][1],len(xdata_vec)+1])
# now append things to return variables
grain_plt_xdata = []
grain_plt_ydata = []
grain_plt_xerr = []
grain_plt_yerr = []
grain_plt_style = []
for i in range(len(start_stop)):
grain_plt_xdata.append(xdata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_ydata.append(ydata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_xerr.append(xdata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_yerr.append(ydata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_style.append(style_plt[start_stop[i][0]])
return [grain_plt_xdata,grain_plt_xerr,grain_plt_ydata,grain_plt_yerr,grain_plt_style] | This routine returns data isotopic data to plot from the
filtered list of data.
Parameters
----------
isox : list
Isotopes for x axis in standard format ['Si-28','Si-30'].
isoy : list
Same as isox but for y axis.
deltax : boolean, optional
If true then x-axis values are in delta format. The default
is True.
deltay : boolean, optional
Same as for x-axis but for y-axis. The default is True.
Returns
-------
grpl_xdata
grain plot x-axis data.
grpl_xerr
x-axis error bars.
grpl_ydata
grain plot y-axis data.
grpl_yerr
y-axis error bars.
grpl_style
style data for the different symbols. | entailment |
def plot_pattern_return(self, isos, delta=True):
'''
This routine returns data isotopic data to plot from the
filtered list of data.
Parameters
----------
isos : list
Isotopes for x axis in standard format
[['Si-30','Si-28'],['Si-29','Si-30'],...]
isoy : list
Same as isox but for y axis.
deltay: boolean, optional
Same as for x-axis but for y-axis. The default is True.
Returns
-------
grpl_data
grain plot x-axis data.
grpl_err
x-axis error bars.
grpl_style
style data for the different symbols.
'''
# check availability
index = []
delta_b = []
ratio_b = []
for i in range(len(isos)):
tmpi,tmpd,tmpr = self.check_availability(isos[i])
index.append(tmpi)
delta_b.append(tmpd)
ratio_b.append(tmpd)
for i in range(len(index)):
if index[i] == -1:
print('Input not available for: ' + isos[i] + '. Revise!')
return None
# create x and y data as 1d vectors, also error bars
data_vec = np.zeros((len(self.data),len(isos)))
data_err = np.zeros((len(self.data),len(isos)))
for it in range(len(self.data)):
for jt in range(len(isos)):
data_vec[it][jt] = self.data[it][index[jt]]
data_err[it][jt] = self.data[it][index[jt]+1]
# index data that are nan
index_nan = []
for it in range(len(data_vec)):
for jt in range(len(data_vec[it])):
if np.isnan(data_vec[it][jt]):
index_nan.append(it)
# make range of all incides
index_filtered = list(range(len(data_vec)))
for it in range(len(index_nan)):
index_filtered.remove(index_nan[it])
data_tmp = np.zeros((len(index_filtered),len(isos)))
err_tmp = np.zeros((len(index_filtered),len(isos)))
style_plt = np.zeros((len(index_filtered),len(self.header_style)),dtype='|S1024')
for i in range(len(index_filtered)):
data_tmp[i] = data_vec[index_filtered[i]]
err_tmp[i] = data_err[index_filtered[i]]
for j in range(len(style_plt[i])):
style_plt[i][j] = self.style[i][j]
xdata_vec = xdata_tmp
ydata_vec = ydata_tmp
xdata_err = xerr_tmp
ydata_err = yerr_tmp
# loop through error and set nans to 0
for i in range(len(xdata_err)):
if np.isnan(xdata_err[i]):
xdata_err[i] = 0.
if np.isnan(ydata_err[i]):
ydata_err[i] = 0.
# FIXME here
# make start stop index for groups
start_stop = []
start = 0
for it in range(len(xdata_vec)-1):
if (style_plt[it] == style_plt[it+1]).all():
continue
else:
stop = it
start_stop.append([start,stop])
start = stop+1
# last entry
if start_stop == []:
start_stop.append([0,len(xdata_vec)])
else:
start_stop.append([start_stop[len(start_stop)-1][1]+1,len(xdata_vec)])
# now append things to return variables
grain_plt_xdata = []
grain_plt_ydata = []
grain_plt_xerr = []
grain_plt_yerr = []
grain_plt_style = []
for i in range(len(start_stop)):
grain_plt_xdata.append(xdata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_ydata.append(ydata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_xerr.append(xdata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_yerr.append(ydata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_style.append(style_plt[start_stop[i][0]])
return [grain_plt_xdata,grain_plt_xerr,grain_plt_ydata,grain_plt_yerr,grain_plt_style] | This routine returns data isotopic data to plot from the
filtered list of data.
Parameters
----------
isos : list
Isotopes for x axis in standard format
[['Si-30','Si-28'],['Si-29','Si-30'],...]
isoy : list
Same as isox but for y axis.
deltay: boolean, optional
Same as for x-axis but for y-axis. The default is True.
Returns
-------
grpl_data
grain plot x-axis data.
grpl_err
x-axis error bars.
grpl_style
style data for the different symbols. | entailment |
def check_availability(self, isos):
'''
This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not
'''
# make names
iso1name = iso_name_converter(isos[0])
iso2name = iso_name_converter(isos[1])
ratio = iso1name + '/' + iso2name
ratio_inv = iso2name + '/' + iso1name
delta = 'd(' + iso1name + '/' + iso2name + ')'
delta_inv = 'd(' + iso2name + '/' + iso1name + ')'
index = -1
# search for data entry
try:
index = self.datadict[ratio]
delta_b = False
ratio_b = False
except KeyError:
try:
index = self.datadict[ratio_inv]
delta_b = False
ratio_b = True
except KeyError:
try:
index = self.datadict[delta]
delta_b = True
ratio_b = False
except KeyError:
try:
index = self.datadict[delta_inv]
delta_b = True
ratio_b = True
except KeyError:
index = -1
delta_b = None
ratio_b = None
return index, delta_b, ratio_b | This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not | entailment |
def ratio_to_delta(self, isos_ss, ratio, oneover=False):
'''
Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value
'''
# define if isos_ss is the ratio or the isotopes
if type(isos_ss) == float:
ss_ratio = isos_ss
elif type(isos_ss) == list:
ss_ratio = self.inut.isoratio_init(isos_ss)
else:
print('Check input of isos_ss into ratio_to_delta routine')
return None
# check if one over is necessary or not
if oneover:
ratio = old_div(1,ratio)
# calculate delta value
delta = (old_div(ratio, ss_ratio) - 1.) * 1000.
return delta | Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value | entailment |
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient`
'''
res = copy.deepcopy(self)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res | Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
:param function(request) filter: A filter function.
:return: A new service using the specified filter.
:rtype: a subclass of :class:`StorageClient` | entailment |
def _perform_request(self, request, encoding='utf-8'):
'''
Sends the request and return response. Catches HTTPError and hands it
to error handler
'''
try:
resp = self._filter(request)
if sys.version_info >= (3,) and isinstance(resp, bytes) and \
encoding:
resp = resp.decode(encoding)
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
except HTTPError as ex:
_storage_error_handler(ex)
# Wrap all other exceptions as AzureExceptions to ease exception handling code
except Exception as ex:
if sys.version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
raise AzureException
else:
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
raise AzureException('{}: {}'.format(ex.__class__.__name__, ex.args[0]))
return resp | Sends the request and return response. Catches HTTPError and hands it
to error handler | entailment |
def _validate_snmp(self):
"""Validates SNMP credentials.
:raises exception.IloInvalidInputError
"""
cred = self.snmp_credentials
if cred is not None:
if cred.get('snmp_inspection') is True:
if not all([cred.get('auth_user'),
cred.get('auth_prot_pp'),
cred.get('auth_priv_pp')]):
msg = self._('Either few or all mandatory '
'SNMP credentials '
'are missing.')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
try:
auth_protocol = cred['auth_protocol']
if auth_protocol not in ["SHA", "MD5"]:
msg = self._('Invalid SNMP auth protocol '
'provided. '
'Valid values are SHA or MD5')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Auth protocol not provided by user. '
'The default value of MD5 will '
'be considered.')
LOG.debug(msg)
pass
try:
priv_protocol = cred['priv_protocol']
if priv_protocol not in ["AES", "DES"]:
msg = self._('Invalid SNMP privacy protocol '
'provided. '
'Valid values are AES or DES')
LOG.error(msg)
raise exception.IloInvalidInputError(msg)
except KeyError:
msg = self._('Privacy protocol not provided '
'by user. '
'The default value of DES will '
'be considered.')
LOG.debug(msg)
pass
else:
LOG.debug(self._('snmp_inspection set to False. SNMP'
'inspection will not be performed.'))
else:
LOG.debug(self._('SNMP credentials not provided. SNMP '
'inspection will not be performed.')) | Validates SNMP credentials.
:raises exception.IloInvalidInputError | entailment |
def _call_method(self, method_name, *args, **kwargs):
"""Call the corresponding method using RIBCL, RIS or REDFISH
Make the decision to invoke the corresponding method using RIBCL,
RIS or REDFISH way. In case of none, throw out ``NotImplementedError``
"""
if self.use_redfish_only:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
raise NotImplementedError()
else:
the_operation_object = self.ribcl
if 'Gen10' in self.model:
if method_name in SUPPORTED_REDFISH_METHODS:
the_operation_object = self.redfish
else:
if (self.is_ribcl_enabled is not None
and not self.is_ribcl_enabled):
raise NotImplementedError()
elif ('Gen9' in self.model) and (method_name in
SUPPORTED_RIS_METHODS):
the_operation_object = self.ris
method = getattr(the_operation_object, method_name)
LOG.debug(self._("Using %(class)s for method %(method)s."),
{'class': type(the_operation_object).__name__,
'method': method_name})
return method(*args, **kwargs) | Call the corresponding method using RIBCL, RIS or REDFISH
Make the decision to invoke the corresponding method using RIBCL,
RIS or REDFISH way. In case of none, throw out ``NotImplementedError`` | entailment |
def set_iscsi_info(self, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None):
"""Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
return self._call_method('set_iscsi_info', target_name, lun,
ip_address, port, auth_method, username,
password) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | entailment |
def set_iscsi_boot_info(self, mac, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None):
"""Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
LOG.warning("'set_iscsi_boot_info' is deprecated. The 'MAC' parameter"
"passed in is ignored. Use 'set_iscsi_info' instead.")
return self._call_method('set_iscsi_info', target_name, lun,
ip_address, port, auth_method, username,
password) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | entailment |
def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option,
write_protect) | Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media. | entailment |
def get_essential_properties(self):
"""Get the essential scheduling properties
:returns: a dictionary containing memory size, disk size,
number of cpus, cpu arch, port numbers and
mac addresses.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
data = self._call_method('get_essential_properties')
if (data['properties']['local_gb'] == 0):
cred = self.snmp_credentials
if cred and cred.get('snmp_inspection'):
disksize = snmp.get_local_gb(self.host, cred)
if disksize:
data['properties']['local_gb'] = disksize
else:
msg = self._('SNMP inspection failed to '
'get the disk size. Returning '
'local_gb as 0.')
LOG.debug(msg)
else:
msg = self._("SNMP credentials were not set and "
"RIBCL/Redfish failed to get the disk size. "
"Returning local_gb as 0.")
LOG.debug(msg)
return data | Get the essential scheduling properties
:returns: a dictionary containing memory size, disk size,
number of cpus, cpu arch, port numbers and
mac addresses.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | entailment |
def get_server_capabilities(self):
"""Get hardware properties which can be used for scheduling
:return: a dictionary of server capabilities.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
capabilities = self._call_method('get_server_capabilities')
# TODO(nisha): Assumption is that Redfish always see the pci_device
# member name field populated similarly to IPMI.
# If redfish is not able to get nic_capacity, we can fall back to
# IPMI way of retrieving nic_capacity in the future. As of now
# the IPMI is not tested on Gen10, hence assuming that
# Redfish will always be able to give the data.
if ('Gen10' not in self.model):
major_minor = (
self._call_method('get_ilo_firmware_version_as_major_minor'))
# NOTE(vmud213): Even if it is None, pass it on to get_nic_capacity
# as we still want to try getting nic capacity through ipmitool
# irrespective of what firmware we are using.
nic_capacity = ipmi.get_nic_capacity(self.ipmi_host_info,
major_minor)
if nic_capacity:
capabilities.update({'nic_capacity': nic_capacity})
if capabilities:
return capabilities | Get hardware properties which can be used for scheduling
:return: a dictionary of server capabilities.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | entailment |
def _read_mesafile(filename,data_rows=0,only='all'):
""" private routine that is not directly called by the user"""
f=open(filename,'r')
vv=[]
v=[]
lines = []
line = ''
for i in range(0,6):
line = f.readline()
lines.extend([line])
hval = lines[2].split()
hlist = lines[1].split()
header_attr = {}
for a,b in zip(hlist,hval):
header_attr[a] = float(b)
if only is 'header_attr':
return header_attr
cols = {}
colnum = lines[4].split()
colname = lines[5].split()
for a,b in zip(colname,colnum):
cols[a] = int(b)
data = []
old_percent = 0
for i in range(data_rows):
# writing reading status
percent = int(i*100/np.max([1, data_rows-1]))
if percent >= old_percent + 5:
sys.stdout.flush()
sys.stdout.write("\r reading " + "...%d%%" % percent)
old_percent = percent
line = f.readline()
v=line.split()
try:
vv=np.array(v,dtype='float64')
except ValueError:
for item in v:
if item.__contains__('.') and not item.__contains__('E'):
v[v.index(item)]='0'
data.append(vv)
print(' \n')
f.close()
a=np.array(data)
data = []
return header_attr, cols, a | private routine that is not directly called by the user | entailment |
def _cleanstarlog(file_in):
"""
cleaning history.data or star.log file, e.g. to take care of
repetitive restarts.
private, should not be called by user directly
Parameters
----------
file_in : string
Typically the filename of the mesa output history.data or
star.log file, creates a clean file called history.datasa or
star.logsa.
(thanks to Raphael for providing this tool)
"""
file_out=file_in+'sa'
f = open(file_in)
lignes = f.readlines()
f.close()
nb = np.array([],dtype=int) # model number
nb = np.concatenate((nb ,[ int(lignes[len(lignes)-1].split()[ 0])]))
nbremove = np.array([],dtype=int) # model number
i=-1
for i in np.arange(len(lignes)-1,0,-1):
line = lignes[i-1]
if i > 6 and line != "" :
if int(line.split()[ 0])>=nb[-1]:
nbremove = np.concatenate((nbremove,[i-1]))
else:
nb = np.concatenate((nb ,[ int(line.split()[ 0])]))
i=-1
for j in nbremove:
lignes.remove(lignes[j])
fout = open(file_out,'w')
for j in np.arange(len(lignes)):
fout.write(lignes[j])
fout.close() | cleaning history.data or star.log file, e.g. to take care of
repetitive restarts.
private, should not be called by user directly
Parameters
----------
file_in : string
Typically the filename of the mesa output history.data or
star.log file, creates a clean file called history.datasa or
star.logsa.
(thanks to Raphael for providing this tool) | entailment |
def abu_profiles(p,ifig=1,xlm=xlm,ylm=(-8,0),show=False,abunds='All',xaxis=xaxis_type, figsize1=(8,8)):
'''Four panels of abundance plots
Parameters
----------
p : instance
mesa_profile instance
xlm : tuple
xlimits: mass_min, mass_max
abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output.
otherwise provide a list of lists of desired abus
show : Boolean
False for batch use
True for interactive use
xaxis : character
Lagrangian mass is radial mass coordinate
Eulerian radius is radial coordinate, in Mm
'''
matplotlib.rc('figure',facecolor='white',figsize=figsize1)
# create subplot structure
f, ([ax1,ax2],[ax3,ax4]) = pl.subplots(2, 2, sharex=False, sharey=True, figsize=figsize1)
# define 4 groups of elements, one for each of the 4 subplots
all_isos=[['h1','he3','he4','li6','c12','c13','n13','n14','n15','o16','o17','o18','f19'],['ne20','ne21','ne22','na22','na23','mg24','mg25','mg26','al26','al27','si28','si29','si30'], ['p31', 's32','s33', 's34','s36','cl35','cl37','ar36', 'ar38','ar40', 'k39', 'k40','k41'],
['ca40','ca42','ca48','sc45','ti46','ti48','ti50','v50','v51','cr52','cr54','mn55','fe56']]
if abunds == 'All':
abus=[[],[],[],[]]
j=0
for i, row in enumerate(all_isos):
for iso in row:
if iso in p.cols:
abus[i].append(iso)
j+=1
abus1=[]
abus2 =[[],[],[],[]]
for l in range(len(abus)):
for k in range(len(abus[l])):
abus1.append(abus[l][k])
is_small_isos = False
for i in range(len(abus)):
if len(abus[i]) < 5:
is_small_isos = True
print("Missing isotopes from the default list. Distributing the ones you have over the panels.")
if is_small_isos:
n=4
quo, rem = divmod(len(abus1), n)
for i in range(len(abus2)):
for k in range(i*quo,(i+1)*quo+rem):
abus2[i].append(abus1[k])
abus = abus2
#print(abus)
else:
abus = abus
ax = [ax1,ax2,ax3,ax4]
xxx = p.get('radius') if xaxis is "Eulerian" else p.get('mass')
mass = p.get('mass') # in units of Msun
radius = p.get('radius')*ast.rsun_cm/1.e8 # in units of Mm
if xaxis is "Eulerian":
xxx = radius
if xlm[0] == 0 and xlm[1] == 0:
indtop = 0
indbot = len(mass)-1
else:
indbot = np.where(radius>=xlm[0])[0][-1]
indtop = np.where(radius<xlm[1])[0][0]
xll = (radius[indbot],radius[indtop])
xxlabel = "Radius (Mm)"
elif xaxis is "Lagrangian":
xxx = mass
xll = xlm
xxlabel = "$M / \mathrm{M_{sun}}$"
else:
print("Error: don't understand xaxis choice, must be Lagrangian or Eulerian")
for i in range(4):
for thing in abus[i]:
ind = abus[i].index(thing)
ax[i].plot(xxx, np.log10(p.get(thing)), ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
ax[i].set_ylim(ylm)
ax[i].set_xlim(xll)
ax[i].legend(loc=1)
ax[i].set_xlabel(xxlabel)
if i%2 == 0:
ax[i].set_ylabel('log X')
# ax[i].set_aspect('equal')
title_str = "Abundance plot: "+'t ='+str(title_format%p.header_attr['star_age'])\
+' dt ='+str(title_format%p.header_attr['time_step'])\
+'model number = '+str(int(p.header_attr['model_number']))
f.suptitle(title_str, fontsize=12)
f.tight_layout()
f.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.9, wspace=0, hspace=0.1)
f.savefig('abuprof'+str(int(p.header_attr['model_number'])).zfill(6)+'.png') | Four panels of abundance plots
Parameters
----------
p : instance
mesa_profile instance
xlm : tuple
xlimits: mass_min, mass_max
abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output.
otherwise provide a list of lists of desired abus
show : Boolean
False for batch use
True for interactive use
xaxis : character
Lagrangian mass is radial mass coordinate
Eulerian radius is radial coordinate, in Mm | entailment |
def other_profiles(p,ifig=1,xlm=xlm,show=False,xaxis=xaxis_type, figsize2=(10,8)):
'''Four panels of other profile plots
Parameters
----------
p : instance
mesa_profile instance
xll : tuple
xlimits: mass_min, mass_max
show : Boolean
False for batch use
True for interactive use
'''
matplotlib.rc('figure',facecolor='white',figsize=figsize2)
mass = p.get('mass') # in units of Msun
radius = p.get('radius')*ast.rsun_cm/1.e8 # in units of Mm
if xaxis is "Eulerian":
xxx = radius
if xlm[0]==0 and xlm[1] == 0:
indtop = 0
indbot = len(mass)-1
else:
indbot = np.where(radius>=xlm[0])[0][-1]
indtop = np.where(radius<xlm[1])[0][0]
xll = (radius[indbot],radius[indtop])
xxlabel = "radius (Mm)"
elif xaxis is "Lagrangian":
xxx = mass
xll = xlm
xxlabel = "$M / \mathrm{M_{sun}}$"
else:
print("Error: don't understand xaxis choice, must be Lagrangian or Eulerian")
# create subplot structure
t, ([ax1,ax2],[ax3, ax4],[ax5,ax6]) = matplotlib.pyplot.subplots(3, 2, sharex=True, sharey=False)
# panel 1: burns: pp, cno, burn_c
# panel 2: convection and mixing: entropy, Tgrad
# which burning to show
Enuc = ['pp','cno','tri_alfa','burn_c','burn_o','burn_n','burn_si','burn_mg','burn_na','burn_ne','eps_nuc']
ax = ax1
for thing in Enuc:
ind = Enuc.index(thing)
ax.plot(xxx, np.log10(p.get(thing)), ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
#ax.set_title('Nuclear Energy Production')
ax.set_ylim(0,15)
ax.set_xlim(xll)
ax.legend(loc=1, ncol=2, fontsize='small')
#ax.set_xlabel(xxlabel)
ax.set_ylabel('$ \log \epsilon $')
#--------------------------------------------------------------------------------------------#
# gradients
mix = [['gradr']]
mix1 = [['grada']]
for i in range(1):
for thing in mix[i]:
ind = mix[i].index(thing)
for i in range(1):
for thing1 in mix1[i]:
ind1 = mix1[i].index(thing1)
ax2.plot(xxx, (np.tanh(np.log10(p.get(thing))-np.log10(p.get(thing1))))\
,ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
ax2.axhline(ls='dashed',color='black',label="")
#ax2.set_title('Mixing Regions')
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylim(-.1,.1)
ax2.set_xlim(xll)
ax2.legend(labels='Mixing',loc=1)
#ax2.set_xlabel(xxlabel)
ax2.set_ylabel('$\\tanh(\\log(\\frac{\\nabla_{rad}}{\\nabla_{ad}}))$')
#--------------------------------------------------------------------------------------------#
# entropy
S = ['entropy']
ax = ax5
for thing in S:
ind = 2
ax.plot(xxx, p.get(thing), ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
#ax.set_title('Specific Entropy (/A*kerg)')
ax.set_ylim(0,50)
ax.set_xlim(xll)
ax.legend(loc=1)
ax.set_xlabel(xxlabel)
ax.set_ylabel(' Specific Entropy')
#--------------------------------------------------------------------------------------------#
# rho, mu, T
S = ['logRho','mu','temperature']
T8 = [False,False,True]
ax = ax6
for thing in S:
ind = S.index(thing)
thisy = p.get(thing)/1.e8 if T8[ind] else p.get(thing)
ax.plot(xxx, thisy, ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
#ax.set_title('Rho, mu, T')
ax.set_ylim(0.,9.)
ax.set_xlim(xll)
ax.legend(loc=0)
ax.set_xlabel(xxlabel)
ax.set_ylabel('log Rho, mu, T8')
#--------------------------------------------------------------------------------------------#
# gas pressure fraction and opacity
S = ['pgas_div_ptotal']
o = ['log_opacity']
ax = ax4
axo = ax.twinx()
for thing in S:
ind = 5
ax.plot(xxx, p.get(thing), ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
for thing in o:
ind = 3
axo.plot(xxx, p.get(thing), ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
# ax.set_title('Pgas fraction + opacity')
# ax.set_ylim(0,60)
ax.set_xlim(xll)
axo.set_xlim(xll)
ax.legend(loc=0)
axo.legend(loc=(.15,.85))
#ax.set_xlabel(xxlabel)
ax.set_ylabel('$\mathrm{ P_{gas} / P_{tot}}$')
axo.set_ylabel('$ log(Opacity)$')
#--------------------------------------------------------------------------------------------#
# Diffusion coefficient
gT = ['log_D_mix','conv_vel_div_csound']
logy = [False,True]
ax = ax3
ind = 0
for thing in gT:
ind = gT.index(thing)
thisx = np.log(p.get(thing))+16 if logy[ind] else p.get(thing)
ax.plot(xxx, thisx, ls=u.linestylecb(ind,a,b)[0],\
marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\
markevery=50,label=thing)
# set x and y lims and labels
ax.axhline(16,ls='dashed',color='black',label="$\mathrm{Ma}=0$")
# ax.set_title('Mixing')
ax.set_ylim(10,17)
ax.set_xlim(xll)
ax.legend(loc=0)
# ax.set_xlabel(xxlabel)
ax.set_ylabel('$\\log D / [cgs] \\log v_{\mathrm{conv}}/c_s + 16 $ ')
title_str = "Other profiles: "+'t ='+str(title_format%p.header_attr['star_age'])\
+', dt ='+str(title_format%p.header_attr['time_step'])\
+', model number ='+str(int(p.header_attr['model_number']))
t.suptitle(title_str, fontsize=12)
# t.tight_layout()
t.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.15, hspace=0.1)
t.savefig('other'+str(int(p.header_attr['model_number'])).zfill(6)+'.png') | Four panels of other profile plots
Parameters
----------
p : instance
mesa_profile instance
xll : tuple
xlimits: mass_min, mass_max
show : Boolean
False for batch use
True for interactive use | entailment |
def _profiles_index(self):
"""
read profiles.index and make hash array
Notes
-----
sets the attributes.
log_ind : hash array that returns profile.data or log.data
file number from model number.
model : the models for which profile.data or log.data is
available
"""
prof_ind_name = self.prof_ind_name
f = open(self.sldir+'/'+prof_ind_name,'r')
line = f.readline()
numlines=int(line.split()[0])
print(str(numlines)+' in profiles.index file ...')
model=[]
log_file_num=[]
for line in f:
model.append(int(line.split()[0]))
log_file_num.append(int(line.split()[2]))
log_ind={} # profile.data number from model
for a,b in zip(model,log_file_num):
log_ind[a] = b
self.log_ind=log_ind
self.model=model | read profiles.index and make hash array
Notes
-----
sets the attributes.
log_ind : hash array that returns profile.data or log.data
file number from model number.
model : the models for which profile.data or log.data is
available | entailment |
def _log_file_ind(self,inum):
"""
Information about available profile.data or log.data files.
Parameters
----------
inum : integer
Attempt to get number of inum's profile.data file.
inum_max: max number of profile.data or log.data files
available
"""
self._profiles_index()
if inum <= 0:
print("Smallest argument is 1")
return
inum_max = len(self.log_ind)
inum -= 1
if inum > inum_max:
print('There are only '+str(inum_max)+' profile file available.')
log_data_number = -1
return log_data_number
else:
log_data_number=self.log_ind[self.model[inum]]
print('The '+str(inum+1)+'. profile.data file is '+ \
str(log_data_number))
return log_data_number | Information about available profile.data or log.data files.
Parameters
----------
inum : integer
Attempt to get number of inum's profile.data file.
inum_max: max number of profile.data or log.data files
available | entailment |
def get(self,str_name):
"""
return a column of data with the name str_name.
Parameters
----------
str_name : string
Is the name of the column as printed in the
profilennn.data or lognnn.data file; get the available
columns from self.cols (where you replace self with the
name of your instance)
"""
column_array = self.data[:,self.cols[str_name]-1].astype('float')
return column_array | return a column of data with the name str_name.
Parameters
----------
str_name : string
Is the name of the column as printed in the
profilennn.data or lognnn.data file; get the available
columns from self.cols (where you replace self with the
name of your instance) | entailment |
def write_PROM_HOTB_progenitor(self,name,description):
"""
Write a progenitor file for the PROMETHEUS/HBOT supernova code.
Parameters
----------
name : string
File name for the progenitor file
description : string
Information to be written into the file header.
"""
try:
from ProgenitorHotb_new import ProgenitorHotb_new
except ImportError:
print('Module ProgenitorHotb_new not found.')
return
nz=len(self.get('mass'))
prog=ProgenitorHotb_new(nz)
prog.header = '#'+description+'\n'
prog.xzn = self.get('rmid')[::-1]*ast.rsun_cm
prog.massb = self.get('mass')[::-1]
prog.r_ob = max(self.get('radius'))*ast.rsun_cm
prog.temp = 10.**self.get('logT')[::-1]*8.620689655172413e-11 # in MeV
prog.stot = self.get('entropy')[::-1]
prog.ye = self.get('ye')[::-1]
prog.densty = 10.**self.get('logRho')[::-1]
prog.press = 10.**self.get('logP')[::-1]
prog.eint = self.get('energy')[::-1]
prog.velx = self.get('velocity')[::-1]
nuclei=['neut','h1','he4','c12','o16','ne20','mg24','si28','s32',
'ar36','ca40','ti44','cr48','fe52','fake']
for i in range(len(nuclei)):
if nuclei[i] == 'fake':
ni56 = self.get('fe56')+self.get('cr56')
prog.xnuc[:,i] = ni56[::-1]
else:
prog.xnuc[:,i] = self.get(nuclei[i])[::-1]
prog.write(name) | Write a progenitor file for the PROMETHEUS/HBOT supernova code.
Parameters
----------
name : string
File name for the progenitor file
description : string
Information to be written into the file header. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.