code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
system = self._get_host_details()
try:
if system['Boot']['BootSourceOverrideEnabled'] == 'Once':
device = system['Boot']['BootSourceOverrideTarget']
if device in DEVICE_RIS_TO_COMMON:
return DEVICE_RIS_TO_COMMON[device]
return device
else:
# value returned by RIBCL if one-time boot setting are absent
return 'Normal'
except KeyError as e:
msg = "get_one_time_boot failed with the KeyError:%s"
raise exception.IloError((msg) % e) | def get_one_time_boot(self) | Retrieves the current setting for the one time boot.
:returns: Returns the first boot device that would be used in next
boot. Returns 'Normal' is no device is set.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | 7.027832 | 6.07524 | 1.156799 |
manager, uri = self._get_ilo_details()
try:
fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href']
except KeyError:
msg = ("Firmware Update Service resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return fw_uri | def _get_firmware_update_service_resource(self) | Gets the firmware update service uri.
:returns: firmware update service uri
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, for not finding the uri | 7.243765 | 6.429487 | 1.126647 |
fw_update_uri = self._get_firmware_update_service_resource()
action_data = {
'Action': 'InstallFromURI',
'FirmwareURI': file_url,
}
# perform the POST
LOG.debug(self._('Flashing firmware file: %s ...'), file_url)
status, headers, response = self._rest_post(
fw_update_uri, None, action_data)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# wait till the firmware update completes.
common.wait_for_ris_firmware_update_to_complete(self)
try:
state, percent = self.get_firmware_update_progress()
except exception.IloError:
msg = 'Status of firmware update not known'
LOG.debug(self._(msg)) # noqa
return
if state == "ERROR":
msg = 'Unable to update firmware'
LOG.debug(self._(msg)) # noqa
raise exception.IloError(msg)
elif state == "UNKNOWN":
msg = 'Status of firmware update not known'
LOG.debug(self._(msg)) # noqa
else: # "COMPLETED" | "IDLE"
LOG.info(self._('Flashing firmware file: %s ... done'), file_url) | def update_firmware(self, file_url, component_type) | Updates the given firmware on the server for the given component.
:param file_url: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server | 3.462308 | 3.454965 | 1.002125 |
try:
fw_update_uri = self._get_firmware_update_service_resource()
except exception.IloError as e:
LOG.debug(self._('Progress of firmware update not known: %s'),
str(e))
return "UNKNOWN", "UNKNOWN"
# perform the GET
status, headers, response = self._rest_get(fw_update_uri)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
fw_update_state = response.get('State')
fw_update_progress_percent = response.get('ProgressPercent')
LOG.debug(self._('Flashing firmware file ... in progress %d%%'),
fw_update_progress_percent)
return fw_update_state, fw_update_progress_percent | def get_firmware_update_progress(self) | Get the progress of the firmware update.
:returns: firmware update state, one of the following values:
"IDLE", "UPLOADING", "PROGRESSING", "COMPLETED", "ERROR".
If the update resource is not found, then "UNKNOWN".
:returns: firmware update progress percent
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if not able to reach iLO. | 3.492486 | 3.334149 | 1.047489 |
tpm_values = {"NotPresent": False,
"PresentDisabled": True,
"PresentEnabled": True}
try:
tpm_state = self._get_bios_setting('TpmState')
except exception.IloCommandNotSupportedError:
tpm_state = "NotPresent"
tpm_result = tpm_values[tpm_state]
return tpm_result | def _get_tpm_capability(self) | Retrieves if server is TPM capable or not.
:returns True if TPM is Present else False | 4.261279 | 3.809737 | 1.118523 |
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status | def _get_cpu_virtualization(self) | get cpu virtualization status. | 5.132246 | 4.329117 | 1.185518 |
try:
nvdimm_n_status = self._get_bios_setting('NvDimmNMemFunctionality')
if nvdimm_n_status == 'Enabled':
nvn_status = True
else:
nvn_status = False
except exception.IloCommandNotSupportedError:
nvn_status = False
return nvn_status | def _get_nvdimm_n_status(self) | Get status of NVDIMM_N.
:returns: True if NVDIMM_N is present and enabled, False otherwise. | 4.307094 | 4.541492 | 0.948387 |
cur_status = self.get_host_power_status()
if cur_status != 'ON':
raise exception.IloError("Server is not in powered on state.")
self._perform_power_op("Nmi") | def inject_nmi(self) | Inject NMI, Non Maskable Interrupt.
Inject NMI (Non Maskable Interrupt) for a node immediately.
:raises: IloError, on an error from iLO | 8.498568 | 6.844547 | 1.241655 |
headers, bios_uri, bios_settings = self._check_bios_resource()
# Remove the "links" section
bios_settings.pop("links", None)
if only_allowed_settings:
return utils.apply_bios_properties_filter(
bios_settings, constants.SUPPORTED_BIOS_PROPERTIES)
return bios_settings | def get_current_bios_settings(self, only_allowed_settings=True) | Get current BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of current BIOS settings is returned. Depending
on the 'only_allowed_settings', either only the allowed
settings are returned or all the supported settings are
returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | 4.807373 | 5.782148 | 0.831416 |
headers, bios_uri, bios_settings = self._check_bios_resource()
try:
settings_config_uri = bios_settings['links']['Settings']['href']
except KeyError:
msg = ("Settings resource not found. Couldn't get pending BIOS "
"Settings.")
raise exception.IloCommandNotSupportedError(msg)
status, headers, config = self._rest_get(settings_config_uri)
if status != 200:
msg = self._get_extended_error(config)
raise exception.IloError(msg)
# Remove the "links" section
config.pop("links", None)
if only_allowed_settings:
return utils.apply_bios_properties_filter(
config, constants.SUPPORTED_BIOS_PROPERTIES)
return config | def get_pending_bios_settings(self, only_allowed_settings=True) | Get current BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of pending BIOS settings. Depending
on the 'only_allowed_settings', either only the allowed
settings are returned or all the supported settings are
returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | 3.790874 | 3.744417 | 1.012407 |
if not data:
raise exception.IloError("Could not apply settings with"
" empty data")
if only_allowed_settings:
unsupported_settings = [key for key in data if key not in (
constants.SUPPORTED_BIOS_PROPERTIES)]
if unsupported_settings:
msg = ("Could not apply settings as one or more settings are"
" not supported. Unsupported settings are %s."
" Supported settings are %s." % (
unsupported_settings,
constants.SUPPORTED_BIOS_PROPERTIES))
raise exception.IloError(msg)
self._change_bios_setting(data) | def set_bios_settings(self, data=None, only_allowed_settings=True) | Sets current BIOS settings to the provided data.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be set. If False, all the BIOS settings supported by
iLO and present in the 'data' are set.
:param: data: a dictionary of BIOS settings to be applied. Depending
on the 'only_allowed_settings', either only the allowed
settings are set or all the supported settings that are in
the 'data' are set.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | 3.499213 | 3.441931 | 1.016642 |
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the BaseConfig resource.
try:
base_config_uri = bios_settings['links']['BaseConfigs']['href']
except KeyError:
msg = ("BaseConfigs resource not found. Couldn't apply the BIOS "
"Settings.")
raise exception.IloCommandNotSupportedError(msg)
status, headers, config = self._rest_get(base_config_uri)
if status != 200:
msg = self._get_extended_error(config)
raise exception.IloError(msg)
for cfg in config['BaseConfigs']:
default_settings = cfg.get('default')
if default_settings:
break
else:
msg = ("Default BIOS Settings not found in 'BaseConfigs' "
"resource.")
raise exception.IloCommandNotSupportedError(msg)
if only_allowed_settings:
return utils.apply_bios_properties_filter(
default_settings, constants.SUPPORTED_BIOS_PROPERTIES)
return default_settings | def get_default_bios_settings(self, only_allowed_settings=True) | Get default BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of default BIOS settings(factory settings).
Depending on the 'only_allowed_settings', either only the
allowed settings are returned or all the supported settings
are returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | 3.544009 | 3.650161 | 0.970919 |
headers, bios_uri, bios_settings = self._check_bios_resource()
settings_result = bios_settings.get("SettingsResult").get("Messages")
status = "failed" if len(settings_result) > 1 else "success"
return {"status": status, "results": settings_result} | def get_bios_settings_result(self) | Gets the result of the bios settings applied
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server. | 5.27316 | 5.86684 | 0.898808 |
# provide library for Z versus element names, and Z for elements
u.give_zip_element_z_and_names()
# solar abundances are read here
u.solar(file_solar,solar_factor)
# from here I have average abundances in mass_range to plot
average_iso_abund_marco(mass_range,cycle,logic_stable,i_decay)
# element abundances are calculated here
mass_fractions_array_decayed = average_mass_frac_decay
mass_fractions_array_not_decayed = average_mass_frac
u.element_abund_marco(i_decay,stable,jjdum,mass_fractions_array_not_decayed,mass_fractions_array_decayed)
fig = pl.figure() # Figure object
ax = fig.add_subplot(1,1,1) # Axes object: one row, one column, first plot (one plot!)
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(10)
ax.xaxis.set_major_locator(xmajorlocator)
ax.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(0.1)
ymajorlocator = MultipleLocator(1)
ax.yaxis.set_major_locator(ymajorlocator)
ax.yaxis.set_minor_locator(yminorlocator)
ax.set_yscale('log')
if not logic_stable:
for i in range(u.z_bismuth):
pl.plot(z_for_elem[i],elem_prod_fac[i],symbol,markersize=10.)
pl.xlabel('$Atomic$ $number$', fontsize=20)
pl.ylabel('$X_{i}/X_{sun}$', fontsize=20)
pl.ylim(1.0e-2,1000.)
pl.xlim(0,95)
elif logic_stable:
for i in range(u.z_bismuth):
if index_stable[i] == 1:
continue
#pl.plot(z_for_elem[i],elem_prod_fac[i],'ko')
if i_decay == 2:
for i in range(u.z_bismuth):
if index_stable[i] == 1:
pl.plot(z_for_elem[i],elem_prod_fac_decayed[i],symbol,markersize=10.)
pl.xlabel('$Atomic$ $number$', fontsize=20)
pl.ylabel('$X_{i}/X_{sun}$', fontsize=20)
pl.ylim(1.0e-2,1000.)
pl.xlim(0,95)
pl.grid()
pl.show() | def _obsolete_plot_el_abund_marco(directory,name_h5_file,mass_range,cycle,logic_stable,i_decay,file_solar,solar_factor,symbol='ko') | Interface to plot elements abundances averaged over mass_range.
Parameters
----------
directory : string
Location of h5 file to plot. Needed for plot_tools.
name_h5_file : string
Name of h5 file. Needed for plot_tools.
mass_range : list
A 1x2 array required to plot data in a certain mass range. Needed for
_read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco.
logic_stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2, then
plot decayed. Make sense only if stable is true.
file_solar : string
File where to take solar abundances.
solar_factor : float
value to correct initial abundances to solar, e.g. for Z=0.01
and AG89 solar_factor = 2.
See Also
--------
se._read_iso_abund_marco() | 3.325321 | 3.262848 | 1.019147 |
return self.se.get(cycle_list,dataitem,isotope,sparse) | def get(self, cycle_list, dataitem=None, isotope=None, sparse=1) | Simple function that simply calls h5T.py get method. There
are three ways to call this function.
Parameters
----------
cycle_list : string, list
If cycle_list is a string, then get interpates the argument
cycle_list as a dataitem and fetches the dataitem for all
cycles.
If cycle_list is a list, then get fetches the dataitem for
the cycles in the list.
dataitem : string, optional
fetches the dataitem from the list of cycles. If dataitem
is None, then cycle_list must be a string and will be used
as dataitem. If dataitem is an isotope in the form 'H-2',
it then returns the result of,
>>> self.get(cycle_list,'iso_massf',dataitem)
The default is None.
isotope : string, optional
The name of the isotope to fetch, it must be in the form
'H-2'. If isotope is None, then cycle_list or dataitem
must be a string. The default is None.
sparse : integer, optional
Implements a sparsity factor on the fetched data. The
default is 1.
Notes
-----
Calling the get method directly in the form,
>>> self.get(cycle_list,'iso_massf',dataitem)
is depricated, and only included for compatibility. | 5.188429 | 6.148854 | 0.843804 |
isoabunds=self.se.get(cycle,'iso_massf')
A=array(self.se.A)
Z=array(self.se.Z)
names=self.se.isos
Zuq=list(set(Z)) # list of unique Zs
Zuq.sort()
if index==None:
index=[0,len(isoabunds)]
if type(index)==list:
elemabunds=[]
for zone in range(index[0],index[1]):
percent=int((zone-index[0])*100./(index[1]-index[0]))
sys.stdout.flush()
sys.stdout.write("\rgetting elemental abundances " + "...%d%%" % percent)
elemabunds.append([sum(isoabunds[zone][where(Z==iZ)]) for iZ in Zuq])
else:
elemabunds=[sum(isoabunds[index][where(Z==iZ)]) for iZ in Zuq]
return elemabunds | def get_elemental_abunds(self,cycle,index=None) | returns the elemental abundances for one cycle, either
for the whole star or a specific zone depending upon
the value of 'index'.
Parameters
----------
cycle : string or integer
Model to get the abundances for.
index : integer or list, optional
zone number for which to get elemental abundances. If
None the entire abundance profile is returned. If a 1x2
list, the abundances are returned between indices of
index[0] and index[1].
The default is None. | 4.00075 | 4.150755 | 0.963861 |
DataPlot.plot_prof_1(self,species,mod,xlim1,xlim2,ylim1,ylim2,symbol)
| def plot_prof_1(self, mod, species, xlim1, xlim2, ylim1, ylim2,
symbol=None) | plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
symbol : string, optional
Which symbol you want to use. If None symbol is set to '-'.
The default is None. | 4.2067 | 6.598248 | 0.637548 |
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'yps',species)
pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species)
pyl.xlim(xlim1,xlim2)
pyl.legend() | def plot_prof_2(self, mod, species, xlim1, xlim2) | Plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range. | 5.911174 | 5.984907 | 0.98768 |
from xlsxwriter.workbook import Workbook # https://xlsxwriter.readthedocs.org/ Note: We neex xlswriter. Please meake sure it is installed. Run pip install xlsxwriter to install it using pip. If pip is not installed, install it via easy_install pip. Depending on the system you are on, you might need sudo rights for thesethings.'
# isotopes and data
all_data = np.array(self.get(cycle,'iso_massf'))
header_data = self.se.isotopes
# get mass data
mass_data = np.array(self.get(cycle,'mass'))[np.newaxis]
# stack mass data and header together
header_data = np.hstack((['Mass'],header_data))
all_data = np.hstack((mass_data.transpose(),all_data))
# zero the cells with 1.e-99 entry
for i in range(len(all_data)):
for j in range(len(all_data[i])):
if all_data[i][j] == 1.e-99:
all_data[i][j] = 0.
# check how many columns have all zeros in the file
colzero = 0
all_sum = all_data.sum(0)
for i in range(len(all_sum)):
if all_sum[i] == 0.:
colzero += 1
print(str(colzero) + ' columns are empty. Skipping them.')
# now filter data
all_data_fil = np.zeros((len(all_data),len(all_data[0])-colzero))
header_data_fil = np.zeros((len(header_data)-colzero),dtype='|S9')
k = 0
for j in range(len(all_data[0])):
if all_sum[j] != 0:
for i in range(len(all_data)):
all_data_fil[i][k] = all_data[i][j]
header_data_fil[k] = header_data[j]
k += 1
# write to excel file
excelfile = Workbook(outfname + '.xlsx')
wsh = excelfile.add_worksheet(sheetname)
print('If you run from a restart file, this might take a little bit. Be patient!')
for i in range(len(all_data_fil)):
for j in range(len(all_data_fil[i])):
if i == 0:
wsh.write(0,j,header_data_fil[j])
wsh.write(i+1,j,all_data_fil[i][j])
excelfile.close()
return None | def ernst_table_exporter(self, cycle, outfname='table_out',
sheetname='Sheet 1') | This routine takes NuGrid data (model output) for a given
cycle and writes it into an Excel sheet.
This is one format as requested by Ernst Zinner in June 2013
(through Marco). If you want all radioactive isotopes, start
from the restart file. Empty columns are not written out and
you will get a message how many were empty. Please note that
only one cycle is written out.
Parameters
----------
cycle : integer
Number of the cycle to consider.
outfname : string, optional
File name to write it to, .xlsx is appended automatically.
The default is 'table_out'.
sheetname : string, optional
Name of the sheet in the excel file. The default is
'Sheet 1'. | 3.412148 | 3.39227 | 1.00586 |
self.plot_prof_1(num,'H-1',0.,5.,-5,0.)
self.plot_prof_1(num,'He-4',0.,5.,-5,0.)
self.plot_prof_1(num,'C-12',0.,5.,-5,0.)
self.plot_prof_1(num,'O-16',0.,5.,-5,0.)
pyl.legend(loc=3) | def plot4(self, num) | Plots the abundances of H-1, He-4, C-12 and O-16. | 3.039785 | 2.264285 | 1.342492 |
self.plot_prof_2(num,'H-1',0.,5.)
self.plot_prof_2(num,'He-4',0.,5.)
self.plot_prof_2(num,'C-12',0.,5.)
self.plot_prof_2(num,'O-16',0.,5.)
pyl.legend(loc=3) | def plot4_nolog(self, num) | Plots the abundances of H-1, He-4, C-12 and O-16. | 3.367157 | 2.355483 | 1.429498 |
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'yps',species)
pyl.plot(mass[0:len(mass):sparse],np.log10(Xspecies[0:len(Xspecies):sparse]),symbol)
pyl.xlim(xlim1,xlim2)
pyl.ylim(ylim1,ylim2)
pyl.legend() | def plot_prof_sparse(self, mod, species, xlim1, xlim2, ylim1, ylim2,
sparse, symbol) | plot one species for cycle between xlim1 and xlim2.
Parameters
----------
species : list
which species to plot.
mod : string or integer
Model (cycle) to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
sparse : integer
Sparsity factor for points.
symbol : string
which symbol you want to use? | 4.15729 | 4.016471 | 1.03506 |
filename='traj_'+str(mass_coo)+'.dat'
f = open(filename,'a')
radius_at_mass_coo=[]
density_at_mass_coo=[]
temperature_at_mass_coo=[]
masses=self.se.get(list(range(ini,end+1,delta)),'mass')
temps=self.se.get(list(range(ini,end+1,delta)),'temperature')
rhos=self.se.get(list(range(ini,end+1,delta)),'rho')
radii=self.se.get(list(range(ini,end+1,delta)),'radius')
ages=self.se.get(list(range(ini,end+1,delta)),'age')
cycs=list(range(ini,end+1,delta))
age_all=[]
for i in range(len(ages)):
age=ages[i]
if age_in_sec:
age /= constants.one_year
mass=masses[i]
temperature=temps[i]
rho=rhos[i]
radius=radii[i]
my_things=[temperature,rho,radius]
if mass[0]>mass[len(mass)-1]:
zone_above=where(mass>mass_coo)[0][-1]
zone_below=zone_above+1
else:
zone_above=where(mass>mass_coo)[0][0]
zone_below=zone_above-1
if mass[zone_below]>mass[zone_above]:
sys.exit("ERROR: finding of zone index confused")
all_things_interplt=[]
for thing in my_things:
thing_interplt=thing[zone_below]+(mass_coo-mass[zone_below])* \
(thing[zone_above]-thing[zone_below])/(mass[zone_above]-mass[zone_below])
all_things_interplt.append(thing_interplt)
this_temperature,this_rho,this_radius=all_things_interplt
string = str(cycs[i])+' '+str(age)+' '+str(this_temperature)+' '+str(this_rho)
f.write(string+"\n")
radius_at_mass_coo.append(this_radius)
density_at_mass_coo.append(this_rho)
temperature_at_mass_coo.append(this_temperature)
age_all.append(age)
f.close()
if online:
return FileLink(filename)
return radius_at_mass_coo, density_at_mass_coo, temperature_at_mass_coo, age_all | def trajectory(self, ini, end, delta, mass_coo, age_in_sec=False,
online=False) | create a trajectory out of a stellar model
Parameters
----------
ini : integer
Initial model, inital cycle number.
end : integer
Final model, final cycle number.
delta : integer
Sparsity factor of the frames.
mass_coo : float
Mass coordinate for the traj.
age_in_sec : boolean, optional
Set to True if age in se file is in seconds (like in MESA).
The default is False.
Returns
--------
float
radius_at_mass_coo, density_at_mass_coo,
temperature_at_mass_coo, age_all
Notes
-----
plus writes a file with the trajectory information to be used
with ppn.
Warning: remove the old trajectory, if you have any for the same
mass coordinate. You are appending data, not overwriting.
Update: this method works for output types with indexes going
from the outside in (MESA) or the other way around. Also the
requested quantities are linearly interpolated in the mass
shell.
online: boolean, optional
are you working online in the ipython notebook? If so,
you will be given an HTML link to download the file. | 2.48521 | 2.299737 | 1.08065 |
# Marco, you have already implemented finding headers and columns in
# ABUP files. You may want to transplant that into here?
species='C-12'
filename = 'ABUPP%07d0000.DAT' % mod
print(filename)
mass,c12=np.loadtxt(filename,skiprows=4,usecols=[1,18],unpack=True)
c12_se=self.se.get(mod,'iso_massf','C-12')
mass_se=self.se.get(mod,'mass')
pyl.plot(mass,c12)
pyl.plot(mass_se,c12_se,'o',label='cycle '+str(mod))
pyl.legend() | def abup_se_plot(mod,species) | plot species from one ABUPP file and the se file.
You must use this function in the directory where the ABP files
are and an ABUP file for model mod must exist.
Parameters
----------
mod : integer
Model to plot, you need to have an ABUPP file for that
model.
species : string
The species to plot.
Notes
-----
The species is set to 'C-12'. | 8.579093 | 6.825135 | 1.256985 |
import nuutils as u
masses = []
# Check the inputs
#if not self.se.cycles.count(str(cycle)):
# print 'You entered an cycle that doesn\'t exist in this dataset:', cycle
# print 'I will try and correct your format.'
# cyc_len = len(self.se.cycles[-1])
# print cyc_len, len(str(cycle))
#
# while len(str(cycle)) < cyc_len:
# cycle = '0'+str(cycle)
# print cycle
# if not self.se.cycles.count(str(cycle)):
# print 'I was unable to correct your cycle. Please check that it exists in your dataset.'
masses = self.se.get(cycle,'mass')
if mass_range == None:
print('Using default mass range')
mass_range = [min(masses),max(masses)]
# what this was for??? Marco
#masses.sort()
#mass_range.sort()
print('Using The following conditions:')
print('\tmass_range:', mass_range[0], mass_range[1])
print('\tcycle:', cycle)
isotope_names = self.se.isotopes
u.convert_specie_naming_from_h5_to_ppn(isotope_names)
names_ppn_world = u.spe
number_names_ppn_world = u.n_array
u.define_zip_index_for_species(names_ppn_world,number_names_ppn_world)
# from here below I read the abundance.
#name_specie_in_file=self.se.dcols[5]
# I am using directly 'iso_massf' only because somehow m20 explosive do not have dcols....
name_specie_in_file='iso_massf'
abunds=self.se.get(cycle,name_specie_in_file)
global used_masses
used_masses = []
self.mass_frac = []
for i in range(len(masses)):
if mass_range[0] <= masses[i] and mass_range[1] >= masses[i] :
used_masses.append(masses[i])
self.mass_frac.append(abunds[i]) | def _read_iso_abund_marco(self, mass_range, cycle) | plot the abundance of all the chemical species
Parameters
----------
mass_range : list
A 1x2 array containing the lower and upper mass range. If
None, it will plot over the entire range.
cycle : string or integer
A string/integer of the cycle of interest. | 5.476134 | 5.361959 | 1.021294 |
import nuutils as u
global decayed_multi_d
decayed_multi_d=[]
#print len(mass_frac)
#print len(decay_raw)
for iii in range(len(mass_frac)):
jj=-1
decayed=[]
for i in range(len(u.decay_raw)):
if u.jdum[i] > 0.5:
jj=jj+1
dummy=0.
for j in range(len(u.decay_raw[i])):
try:
dum_str = u.decay_raw[i][j]
dummy = dummy + float(self.mass_frac[iii][u.cl[dum_str.lower().capitalize()]])
#print cl[dum_str.lower().capitalize()]
#print dum_str, mass_frac[iii][cl[dum_str.capitalize()]]
except KeyError:
None
#print 'I am not in the network:',decay_raw[i][j]
except IndexError:
None
#print 'I am not read',cl[decay_raw[i][j].lower().capitalize()],decay_raw[i][j]
decayed.append(dummy)
decayed_multi_d.append(decayed) | def decay(self, mass_frac) | this module simply calculate abundances of isotopes after decay.
It requires that before it is used a call is made to
_read_iso_abund_marco and _stable_species.
Parameters
----------
mass_frac : list
alist of mass_frac dicts.
See Also
--------
_read_iso_abund_marco(), nuutils.Utils._stable_species() | 4.579711 | 4.261882 | 1.074575 |
if ("tmass" in keyw) == False:
keyw["tmass"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("cycle" in keyw) == False:
keyw["cycle"] = "cycle"
print("Windyields() initialised. Reading files...")
ypsinit = []
niso = 0
X_i = []
E_i = []
totalmass = []
ypssurf = []
cycles = []
first = True
# The following statements copy global functions into local memory,
# which is called faster, speeding up the code slightly
wc = self._windcalc
cycleret = self.se.cycles
retrieve = self.se.get
capp = cycles.extend
tapp = totalmass.extend
yapp = ypssurf.extend
# Retrieve the data from the files
for i in range(ini,end+1,delta):
step = int(i)
capp([int(cycleret[i-ini])])
tapp([retrieve(step,keyw["tmass"])])
yapp([retrieve(step,keyw["abund"])])
print("Reading complete. Calculating yields and ejected masses...")
nsteps = len(cycles)-1
niso = len(ypssurf[0])
X_i = np.zeros([niso], float)
E_i = np.zeros([niso], float)
# Call the windyields calculator
X_i, E_i = wc(first, totalmass, nsteps, niso, ypssurf, \
ypsinit, X_i, E_i, cycles)
return X_i, E_i | def windyields(self, ini, end, delta, **keyw) | This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument. | 5.380856 | 4.384032 | 1.227376 |
if first == True:
X_i = np.zeros([niso], float)
E_i = np.zeros([niso], float)
ypsinit = ypssurf[0]
for m in range(niso):
for n in range(nsteps):
X_i[m] = X_i[m] + ((totalmass[n] - totalmass[n+1]) * \
(0.5 * (ypssurf[n][m] + ypssurf[n+1][m]) - ypsinit[m]))
E_i[m] = E_i[m] + ((totalmass[n] - totalmass[n+1]) * \
(0.5 * (ypssurf[n][m] + ypssurf[n+1][m])))
else:
for m in range(niso):
for n in range(nsteps):
X_i[m] = X_i[m] + ((totalmass[n] - totalmass[n+1]) * \
(0.5 * (ypssurf[n][m] + ypssurf[n+1][m]) - ypsinit[m]))
E_i[m] = E_i[m] + ((totalmass[n] - totalmass[n+1]) * \
(0.5 * (ypssurf[n][m] + ypssurf[n+1][m])))
return X_i, E_i | def _windcalc(self, first, totalmass, nsteps, niso, ypssurf, ypsinit, \
X_i, E_i, cycles) | This function calculates the windyields and ejected masses as called from
windyields(). It uses a summation version of the formulae used in Hirschi
et al. 2005, "Yields of rotating stars at solar metallicity".
If it is the first file, the arrays need to be created and the initial
abundances set | 1.452552 | 1.449916 | 1.001818 |
import nuutils as u
if not stable and i_decay == 2:
print('ERROR: choose i_decay = 1')
return
#data=mp.se(directory,name_h5_file)
self._read_iso_abund_marco(mass_range,cycle)
#print spe
if i_decay == 2:
u.stable_specie()
self.decay(self.mass_frac)
# here I am calculating average mass fraction for all isotopes in given mass range, and then
# if needed calculating average over decayed.
# warning: mass_range is bigger than used_masses range, by definition. Should I use it?
print('average over used_masses range, not over original mass_range')
print(used_masses[0],used_masses[len(used_masses)-1],'instead of',mass_range[0],mass_range[1])
global average_mass_frac
average_mass_frac = []
if len(used_masses) >= 2:
dm_tot = abs(used_masses[len(used_masses)-1]-used_masses[0])
for j in range(len(u.spe)-1):
temp = 0.
for i in range(len(used_masses)-1):
dm_i = abs(used_masses[i+1]-used_masses[i])
temp = float(self.mass_frac[i][j]*dm_i/dm_tot) + temp
average_mass_frac.append(temp)
#print average_mass_frac
elif len(used_masses) == 1:
print('case with 1 mass zone only, not implemented yet')
somma = 0.
somma = sum(average_mass_frac)
print('departure from 1 of sum of average_mass_frac=',abs(1. - somma))
# not let's do it over decayed also, if i_decay = 2
if i_decay == 2:
global average_mass_frac_decay
average_mass_frac_decay = []
dm_tot = abs(used_masses[len(used_masses)-1]-used_masses[0])
#
#print len(decayed_multi_d[0]),decayed_multi_d[0]
for j in range(len(u.back_ind)):
temp = 0.
for i in range(len(used_masses)-1):
dm_i = abs(used_masses[i+1]-used_masses[i])
temp = float(decayed_multi_d[i][j]*dm_i/dm_tot) + temp
average_mass_frac_decay.append(temp)
somma = 0.
somma = sum(average_mass_frac_decay)
print('departure from 1 of sum of average_mass_frac_decay=',abs(1. - somma)) | def average_iso_abund_marco(self,mass_range,cycle,stable,i_decay) | Interface to average over mass_range.
Parameters
----------
mass_range : list
A 1x2 array required to plot data in a certain mass range.
Needed for _read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco
stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2,
then plot decayed. Make sense only if stable is true.
See Also
--------
_read_iso_abund_marco() | 3.850537 | 3.678916 | 1.04665 |
import nuutils as u
# provide library for Z versus element names, and Z for elements
#element_name = self.se.elements
element_name = self.elements_names
u.give_zip_element_z_and_names(element_name)
self.z_of_element_name = u.index_z_for_elements | def _get_elem_names(self) | returns for one cycle an element name dictionary. | 19.924103 | 17.772705 | 1.121051 |
import nuutils as u
masses_for_this_cycle = self.se.get(cycle,'mass')
self._read_iso_abund_marco([min(masses_for_this_cycle),max(masses_for_this_cycle)],cycle)
u.stable_specie()
self.decay(self.mass_frac)
self.index_for_all_species = u.cl
self.index_for_stable_species = u.back_ind
self.decayed_stable_isotopes_per_cycle = decayed_multi_d
# from here read solar abundances
solar_factor = 2.
u.solar('iniab1.0E-02.ppn_GN93',solar_factor)
self.stable_isotope_identifier=u.jjdum
self.stable_isotope_list=u.stable
self.isotopic_production_factors=[]
for i in range(len(masses_for_this_cycle)):
pf_dum=[]
jj=0
for j in range(len(self.stable_isotope_identifier)):
if self.stable_isotope_identifier[j] == 1:
pf_dum.append(float(old_div(self.mass_frac[i][self.index_for_all_species[self.stable_isotope_list
[jj].capitalize()]],u.solar_abundance[self.stable_isotope_list[jj].lower()])))
jj=jj+1
#elif self.stable_isotope_identifier[j] == 0:
# pf_dum.append(float(0.))
self.isotopic_production_factors.append(pf_dum)
self.isotopic_production_factors_decayed=[]
for i in range(len(masses_for_this_cycle)):
pf_dum_d=[]
jj=0
for j in range(len(self.stable_isotope_identifier)):
if self.stable_isotope_identifier[j] == 1:
pf_dum_d.append(float(old_div(self.decayed_stable_isotopes_per_cycle[i][self.index_for_stable_species[self.stable_isotope_list
[jj].upper()]],u.solar_abundance[self.stable_isotope_list[jj].lower()])))
jj=jj+1
self.isotopic_production_factors_decayed.append(pf_dum_d) | def get_abundance_iso_decay(self,cycle) | returns the decayed stable isotopes.
Parameters
----------
cycle : integer
The cycle. | 3.858155 | 3.817275 | 1.010709 |
import nuutils as u
masses_for_this_cycle = self.se.get(cycle,'mass')
self._read_iso_abund_marco([min(masses_for_this_cycle),max(masses_for_this_cycle)],cycle)
u.stable_specie()
self.decay(self.mass_frac)
# provide library for Z versus element names, and Z for elements
element_name = self.se.elements
u.give_zip_element_z_and_names(element_name)
# from here read solar abundances
solar_factor = 2.
u.solar('iniab1.0E-02.ppn_GN93',solar_factor)
self.stable_isotope_identifier=u.jjdum
self.stable_isotope_list=u.stable
self.element_abundance_not_decayed=[]
self.element_abundance_decayed =[]
self.element_production_factors=[]
self.element_production_factors_decayed=[]
for i in range(len(masses_for_this_cycle)):
mass_fractions_array_decayed = decayed_multi_d[i]
mass_fractions_array_not_decayed = self.mass_frac[i]
u.element_abund_marco(2,self.stable_isotope_list,self.stable_isotope_identifier,mass_fractions_array_not_decayed,mass_fractions_array_decayed)
self.element_abundance_not_decayed.append(u.elem_abund)
self.element_abundance_decayed.append(u.elem_abund_decayed)
self.element_production_factors.append(u.elem_prod_fac)
self.element_production_factors_decayed.append(u.elem_prod_fac_decayed) | def get_abundance_elem(self,cycle) | returns the undecayed element profile (all elements that are
in elem_names).
Parameters
----------
cycle : integer
The cycle number | 5.371567 | 5.449547 | 0.985691 |
return utils.max_safe(
[device.get('CapacityBytes') for device in self.devices
if device.get('CapacityBytes') is not None]) | def maximum_size_bytes(self) | Gets the biggest disk drive
:returns size in bytes. | 8.6208 | 8.011897 | 1.076 |
if target_value not in mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_power_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_power_values': (
mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV.keys())})
raise exception.InvalidInputError(msg)
value = mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV[target_value]
target_uri = (
self._get_hpe_push_power_button_action_element().target_uri)
self._conn.post(target_uri, data={'PushType': value}) | def push_power_button(self, target_value) | Reset the system in hpe exclusive manner.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO. | 3.363676 | 2.855894 | 1.177801 |
return bios.BIOSSettings(
self._conn, utils.get_subresource_path_by(self, 'Bios'),
redfish_version=self.redfish_version) | def bios_settings(self) | Property to provide reference to `BIOSSettings` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 5.176975 | 5.861539 | 0.883211 |
device = PERSISTENT_BOOT_DEVICE_MAP.get(devices[0].upper())
if device == sushy.BOOT_SOURCE_TARGET_UEFI_TARGET:
try:
uefi_devices = self.uefi_target_override_devices
iscsi_device = None
for uefi_device in uefi_devices:
if uefi_device is not None and 'iSCSI' in uefi_device:
iscsi_device = uefi_device
break
if iscsi_device is None:
msg = 'No UEFI iSCSI bootable device found on system.'
raise exception.IloError(msg)
except sushy.exceptions.SushyError as e:
msg = ('Unable to get uefi target override devices. '
'Error %s') % (str(e))
raise exception.IloError(msg)
uefi_boot_settings = {
'Boot': {'UefiTargetBootSourceOverride': iscsi_device}
}
self._conn.patch(self.path, data=uefi_boot_settings)
elif device is None:
device = sushy.BOOT_SOURCE_TARGET_NONE
tenure = (sushy.BOOT_SOURCE_ENABLED_CONTINUOUS
if persistent else sushy.BOOT_SOURCE_ENABLED_ONCE)
self.set_system_boot_source(device, enabled=tenure) | def update_persistent_boot(self, devices=[], persistent=False) | Changes the persistent boot device order in BIOS boot mode for host
Note: It uses first boot device from the devices and ignores rest.
:param devices: ordered list of boot devices
:param persistent: Boolean flag to indicate if the device to be set as
a persistent boot device
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid. | 3.428705 | 3.505362 | 0.978132 |
return secure_boot.SecureBoot(
self._conn, utils.get_subresource_path_by(self, 'SecureBoot'),
redfish_version=self.redfish_version) | def secure_boot(self) | Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 4.49414 | 4.66458 | 0.963461 |
return ethernet_interface.EthernetInterfaceCollection(
self._conn,
self._get_hpe_sub_resource_collection_path('EthernetInterfaces'),
redfish_version=self.redfish_version) | def ethernet_interfaces(self) | Provide reference to EthernetInterfacesCollection instance | 5.497924 | 4.805307 | 1.144136 |
return hpe_smart_storage.HPESmartStorage(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'SmartStorage']),
redfish_version=self.redfish_version) | def smart_storage(self) | This property gets the object for smart storage.
This property gets the object for smart storage.
There is no collection for smart storages.
:returns: an instance of smart storage | 5.678439 | 7.232736 | 0.785103 |
return storage.StorageCollection(
self._conn, utils.get_subresource_path_by(self, 'Storage'),
redfish_version=self.redfish_version) | def storages(self) | This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages | 5.172445 | 7.057151 | 0.732937 |
return simple_storage.SimpleStorageCollection(
self._conn, utils.get_subresource_path_by(self, 'SimpleStorage'),
redfish_version=self.redfish_version) | def simple_storages(self) | This property gets the list of instances for SimpleStorages
:returns: a list of instances of SimpleStorages | 5.147448 | 5.950701 | 0.865015 |
return memory.MemoryCollection(
self._conn, utils.get_subresource_path_by(self, 'Memory'),
redfish_version=self.redfish_version) | def memory(self) | Property to provide reference to `MemoryCollection` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 7.421224 | 6.791907 | 1.092657 |
return (smart_storage_config.
HPESmartStorageConfig(self._conn, smart_storage_config_url,
redfish_version=self.redfish_version)) | def get_smart_storage_config(self, smart_storage_config_url) | Returns a SmartStorageConfig Instance for each controller. | 6.051065 | 5.424779 | 1.115449 |
ac = self.smart_storage.array_controllers.array_controller_by_model(
controller_model)
if ac:
for ssc_id in self.smart_storage_config_identities:
ssc_obj = self.get_smart_storage_config(ssc_id)
if ac.location == ssc_obj.location:
return ssc_obj | def _get_smart_storage_config_by_controller_model(self, controller_model) | Returns a SmartStorageConfig Instance for controller by model.
:returns: SmartStorageConfig Instance for controller | 3.917812 | 4.160699 | 0.941623 |
if self.smart_storage_config_identities is None:
msg = ('The Redfish controller failed to get the '
'SmartStorageConfig controller configurations.')
LOG.debug(msg)
raise exception.IloError(msg) | def check_smart_storage_config_ids(self) | Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO. | 7.069551 | 5.116259 | 1.381781 |
self.check_smart_storage_config_ids()
any_exceptions = []
ld_exc_count = 0
for config_id in self.smart_storage_config_identities:
try:
ssc_obj = self.get_smart_storage_config(config_id)
ssc_obj.delete_raid()
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((config_id, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to delete the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
if ld_exc_count == len(self.smart_storage_config_identities):
msg = ('No logical drives are found in any controllers. Nothing '
'to delete.')
raise exception.IloLogicalDriveNotFoundError(msg) | def delete_raid(self) | Delete the raid configuration on the hardware.
Loops through each SmartStorageConfig controller and clears the
raid configuration.
:raises: IloError, on an error from iLO. | 3.489758 | 3.114572 | 1.120461 |
default = (
self.smart_storage.array_controllers.get_default_controller.model)
controllers = {default: []}
for ld in raid_config['logical_disks']:
if 'controller' not in ld.keys():
controllers[default].append(ld)
else:
ctrl = ld['controller']
if ctrl not in controllers:
controllers[ctrl] = []
controllers[ctrl].append(ld)
return controllers | def _parse_raid_config_data(self, raid_config) | It will parse raid config data based on raid controllers
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'controller':
'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary of controllers, each containing list of
their respected logical drives. | 4.359978 | 4.161329 | 1.047737 |
self.check_smart_storage_config_ids()
any_exceptions = []
controllers = self._parse_raid_config_data(raid_config)
# Creating raid on rest of the controllers
for controller in controllers:
try:
config = {'logical_disks': controllers[controller]}
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
ssc_obj.create_raid(config)
else:
members = (
self.smart_storage.array_controllers.get_members())
models = [member.model for member in members]
msg = ('Controller not found. Available controllers are: '
'%(models)s' % {'models': models})
any_exceptions.append((controller, msg))
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to create the '
'raid configuration for one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg) | def create_raid(self, raid_config) | Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO. | 4.017216 | 3.745792 | 1.072461 |
controllers = self._parse_raid_config_data(raid_config)
ld_exc_count = 0
any_exceptions = []
config = {'logical_disks': []}
for controller in controllers:
try:
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
result = ssc_obj.read_raid(controller=controller)
config['logical_disks'].extend(result['logical_disks'])
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if ld_exc_count == len(controllers):
msg = 'No logical drives are found in any controllers.'
raise exception.IloLogicalDriveNotFoundError(msg)
if any_exceptions:
msg = ('The Redfish controller failed to read the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
return config | def _post_create_read_raid(self, raid_config) | Read the logical drives from the system after post-create raid
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloLogicalDriveNotFoundError, if no controllers are configured
:raises: IloError, if any error form iLO
:returns: A dictionary containing list of logical disks | 3.539564 | 3.370545 | 1.050146 |
any_exceptions = []
ssc_ids = self.smart_storage_config_identities
config = {'logical_disks': []}
for ssc_id in ssc_ids:
try:
ssc_obj = self.get_smart_storage_config(ssc_id)
ac_obj = (
self.smart_storage.array_controllers.
array_controller_by_location(ssc_obj.location))
if ac_obj:
model = ac_obj.model
result = ssc_obj.read_raid()
if result:
config['logical_disks'].extend(result['logical_disks'])
except sushy.exceptions.SushyError as e:
any_exceptions.append((model, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to read the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
return config | def _post_delete_read_raid(self) | Read the logical drives from the system after post-delete raid
:raises: IloError, if any error form iLO
:returns: Empty dictionary with format: {'logical_disks': []} | 4.160767 | 3.801611 | 1.094475 |
self.check_smart_storage_config_ids()
if raid_config:
# When read called after create raid, user can pass raid config
# as a input
result = self._post_create_read_raid(raid_config=raid_config)
else:
# When read called after delete raid, there will be no input
# passed by user then
result = self._post_delete_read_raid()
return result | def read_raid(self, raid_config=None) | Read the logical drives from the system
:param raid_config: None or a dictionary containing target raid
configuration data. This data stucture should be as
follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary containing list of logical disks | 7.265471 | 7.95014 | 0.91388 |
'''
Sends the request and return response. Catches HTTPError and hands it
to error handler
'''
operation_context = operation_context or _OperationContext()
retry_context = RetryContext()
# Apply the appropriate host based on the location mode
self._apply_host(request, operation_context, retry_context)
# Apply common settings to the request
_update_request(request)
while(True):
try:
try:
# Execute the request callback
if self.request_callback:
self.request_callback(request)
# Add date and auth after the callback so date doesn't get too old and
# authentication is still correct if signed headers are added in the request
# callback. This also ensures retry policies with long back offs
# will work as it resets the time sensitive headers.
_add_date_header(request)
self.authentication.sign_request(request)
# Set the request context
retry_context.request = request
# Perform the request
response = self._httpclient.perform_request(request)
# Execute the response callback
if self.response_callback:
self.response_callback(response)
# Set the response context
retry_context.response = response
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
_http_error_handler(HTTPError(response.status, response.message, response.headers, response.body))
# Parse the response
if parser:
if parser_args:
args = [response]
args.extend(parser_args)
return parser(*args)
else:
return parser(response)
else:
return
except AzureException as ex:
raise ex
except Exception as ex:
if sys.version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
raise AzureException(ex.args[0])
else:
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
msg = ""
if len(ex.args) > 0:
msg = ex.args[0]
raise AzureException('{}: {}'.format(ex.__class__.__name__, msg))
except AzureException as ex:
# Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
# will not be resolved with retries.
if str(ex) == _ERROR_DECRYPTION_FAILURE:
raise ex
# Determine whether a retry should be performed and if so, how
# long to wait before performing retry.
retry_interval = self.retry(retry_context)
if retry_interval is not None:
# Execute the callback
if self.retry_callback:
self.retry_callback(retry_context)
# Sleep for the desired retry interval
sleep(retry_interval)
else:
raise ex
finally:
# If this is a location locked operation and the location is not set,
# this is the first request of that operation. Set the location to
# be used for subsequent requests in the operation.
if operation_context.location_lock and not operation_context.host_location:
operation_context.host_location = {retry_context.location_mode: request.host} | def _perform_request(self, request, parser=None, parser_args=None, operation_context=None) | Sends the request and return response. Catches HTTPError and hands it
to error handler | 5.381721 | 5.060451 | 1.063487 |
''' Convert json response to entity.
The entity format is:
{
"Address":"Mountain View",
"Age":23,
"AmountDue":200.23,
"CustomerCode@odata.type":"Edm.Guid",
"CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
"CustomerSince@odata.type":"Edm.DateTime",
"CustomerSince":"2008-07-10T00:00:00",
"IsActive":true,
"NumberOfOrders@odata.type":"Edm.Int64",
"NumberOfOrders":"255",
"PartitionKey":"mypartitionkey",
"RowKey":"myrowkey"
}
'''
entity = Entity()
properties = {}
edmtypes = {}
odata = {}
for name, value in entry_element.items():
if name.startswith('odata.'):
odata[name[6:]] = value
elif name.endswith('@odata.type'):
edmtypes[name[:-11]] = value
else:
properties[name] = value
# Partition key is a known property
partition_key = properties.pop('PartitionKey', None)
if partition_key:
entity['PartitionKey'] = partition_key
# Row key is a known property
row_key = properties.pop('RowKey', None)
if row_key:
entity['RowKey'] = row_key
# Timestamp is a known property
timestamp = properties.pop('Timestamp', None)
if timestamp:
entity['Timestamp'] = _from_entity_datetime(timestamp)
for name, value in properties.items():
mtype = edmtypes.get(name);
# use the property resolver if present
if property_resolver:
mtype = property_resolver(partition_key, row_key,
name, value, mtype)
# throw if the type returned is not a valid edm type
if mtype and mtype not in _EDM_TYPES:
raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype))
# Add type for Int32
if type(value) is int:
mtype = EdmType.INT32
# no type info, property should parse automatically
if not mtype:
entity[name] = value
else: # need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
try:
property = conv(value)
except Exception as e:
# throw if the type returned by the property resolver
# cannot be used in the conversion
if property_resolver:
raise AzureException(
_ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype))
else:
raise e
else:
property = EntityProperty(mtype, value)
entity[name] = property
# extract etag from entry
etag = odata.get('etag')
if timestamp:
etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"'
entity['etag'] = etag
return entity | def _convert_json_to_entity(entry_element, property_resolver) | Convert json response to entity.
The entity format is:
{
"Address":"Mountain View",
"Age":23,
"AmountDue":200.23,
"CustomerCode@odata.type":"Edm.Guid",
"CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
"CustomerSince@odata.type":"Edm.DateTime",
"CustomerSince":"2008-07-10T00:00:00",
"IsActive":true,
"NumberOfOrders@odata.type":"Edm.Int64",
"NumberOfOrders":"255",
"PartitionKey":"mypartitionkey",
"RowKey":"myrowkey"
} | 3.450995 | 2.4015 | 1.437016 |
''' Converts the response to tables class.
'''
if response is None:
return response
tables = _list()
continuation = _get_continuation_from_response_headers(response)
tables.next_marker = continuation.get('NextTableName')
root = loads(response.body.decode('utf-8'))
if 'TableName' in root:
table = Table()
table.name = root['TableName']
tables.append(table)
else:
for element in root['value']:
table = Table()
table.name = element['TableName']
tables.append(table)
return tables | def _convert_json_response_to_tables(response) | Converts the response to tables class. | 4.009098 | 3.719119 | 1.07797 |
''' Converts the response to tables class.
'''
if response is None:
return response
entities = _list()
entities.next_marker = _get_continuation_from_response_headers(response)
root = loads(response.body.decode('utf-8'))
if 'value' in root:
for entity in root['value']:
entities.append(_convert_json_to_entity(entity,
property_resolver))
else:
entities.append(_convert_json_to_entity(entity,
property_resolver))
return entities | def _convert_json_response_to_entities(response, property_resolver) | Converts the response to tables class. | 4.206621 | 3.528407 | 1.192215 |
''' Extracts the etag from the response headers. '''
if response and response.headers:
for name, value in response.headers:
if name.lower() == 'etag':
return value
return None | def _extract_etag(response) | Extracts the etag from the response headers. | 3.434046 | 3.502562 | 0.980438 |
'''
Returns a generator to list the tables. The generator will lazily follow
the continuation tokens returned by the service and stop when all tables
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
tables, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param int num_results:
The maximum number of tables to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.models.table.Table` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`:
'''
kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout}
resp = self._list_tables(**kwargs)
return ListGenerator(resp, self._list_tables, (), kwargs) | def list_tables(self, num_results=None, marker=None, timeout=None) | Returns a generator to list the tables. The generator will lazily follow
the continuation tokens returned by the service and stop when all tables
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
tables, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param int num_results:
The maximum number of tables to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.models.table.Table` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`: | 3.609816 | 1.305578 | 2.764919 |
'''
Returns a list of tables under the specified account. Makes a single list
request to the service. Used internally by the list_tables method.
:param int max_results:
The maximum number of tables to return. A single list request may
return up to 1000 tables and potentially a continuation token which
should be followed to get additional resutls.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of tables, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.models.table.Table`:
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/Tables'
request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)]
request.query = [
('$top', _int_to_str(max_results)),
('NextTableName', _to_str(marker)),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_json_response_to_tables(response) | def _list_tables(self, max_results=None, marker=None, timeout=None) | Returns a list of tables under the specified account. Makes a single list
request to the service. Used internally by the list_tables method.
:param int max_results:
The maximum number of tables to return. A single list request may
return up to 1000 tables and potentially a continuation token which
should be followed to get additional resutls.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of tables, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.models.table.Table`: | 3.952475 | 1.562876 | 2.528975 |
'''
Creates a new table in the storage account.
:param str table_name:
The name of the table to create. The table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
:param bool fail_on_exist:
Specifies whether to throw an exception if the table already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table', table_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/Tables'
request.query = [('timeout', _int_to_str(timeout))]
request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
_DEFAULT_PREFER_HEADER,
_DEFAULT_ACCEPT_HEADER]
request.body = _get_request_body(_convert_table_to_json(table_name))
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True | def create_table(self, table_name, fail_on_exist=False, timeout=None) | Creates a new table in the storage account.
:param str table_name:
The name of the table to create. The table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
:param bool fail_on_exist:
Specifies whether to throw an exception if the table already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool | 2.285906 | 1.583417 | 1.443654 |
'''
Returns a boolean indicating whether the table exists.
:param str table_name:
The name of table to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the table exists.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/Tables' + "('" + table_name + "')"
request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)]
request.query = [('timeout', _int_to_str(timeout))]
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False | def exists(self, table_name, timeout=None) | Returns a boolean indicating whether the table exists.
:param str table_name:
The name of table to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the table exists.
:rtype: bool | 2.342259 | 1.916218 | 1.222335 |
'''
Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`:
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_signed_identifiers(response.body) | def get_table_acl(self, table_name, timeout=None) | Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`: | 2.54682 | 1.478886 | 1.722121 |
'''
Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table’s permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
self._perform_request(request) | def set_table_acl(self, table_name, signed_identifiers=None, timeout=None) | Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table’s permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds. | 3.251359 | 1.251595 | 2.597773 |
'''
Commits a :class:`~azure.storage.table.TableBatch` request.
:param str table_name:
The name of the table to commit the batch to.
:param TableBatch batch:
The batch to commit.
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of the batch responses corresponding to the requests in the batch.
:rtype: list of response objects
'''
_validate_not_none('table_name', table_name)
# Construct the batch request
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + '$batch'
request.query = [('timeout', _int_to_str(timeout))]
# Update the batch operation requests with table and client specific info
for row_key, batch_request in batch._requests:
batch_request.host = self._get_host()
if batch_request.method == 'POST':
batch_request.path = '/' + _to_str(table_name)
else:
batch_request.path = _get_entity_path(table_name, batch._partition_key, row_key)
_update_request(batch_request)
# Construct the batch body
request.body, boundary = _convert_batch_to_json(batch._requests)
request.headers = [('Content-Type', boundary)]
# Perform the batch request and return the response
response = self._perform_request(request)
responses = _parse_batch_response(response.body)
return responses | def commit_batch(self, table_name, batch, timeout=None) | Commits a :class:`~azure.storage.table.TableBatch` request.
:param str table_name:
The name of the table to commit the batch to.
:param TableBatch batch:
The batch to commit.
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of the batch responses corresponding to the requests in the batch.
:rtype: list of response objects | 2.950933 | 2.434107 | 1.212327 |
'''
Creates a batch object which can be used as a context manager. Commits the batch on exit.
:param str table_name:
The name of the table to commit the batch to.
:param int timeout:
The server timeout, expressed in seconds.
'''
batch = TableBatch()
yield batch
self.commit_batch(table_name, batch, timeout=timeout) | def batch(self, table_name, timeout=None) | Creates a batch object which can be used as a context manager. Commits the batch on exit.
:param str table_name:
The name of the table to commit the batch to.
:param int timeout:
The server timeout, expressed in seconds. | 3.932746 | 2.010796 | 1.955815 |
'''
Get an entity from the specified table. Throws if the entity does not exist.
:param str table_name:
The name of the table to get the entity from.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str select:
Returns only the desired properties of an entity from the set.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: The retrieved entity.
:rtype: :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
request = _get_entity(partition_key, row_key, select, accept)
request.host = self._get_host()
request.path = _get_entity_path(table_name, partition_key, row_key)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _convert_json_response_to_entity(response, property_resolver) | def get_entity(self, table_name, partition_key, row_key, select=None,
accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None) | Get an entity from the specified table. Throws if the entity does not exist.
:param str table_name:
The name of the table to get the entity from.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str select:
Returns only the desired properties of an entity from the set.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: The retrieved entity.
:rtype: :class:`~azure.storage.table.models.Entity` | 2.75344 | 1.406783 | 1.95726 |
'''
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_entity(entity)
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response) | def insert_entity(self, table_name, entity, timeout=None) | Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str | 3.855045 | 1.411512 | 2.731146 |
'''
Updates an existing entity in a table. Throws if the entity does not exist.
The update_entity operation replaces the entire entity and can be used to
remove properties.
:param str table_name:
The name of the table containing the entity to update.
:param entity:
The entity to update. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The update operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional update, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _update_entity(entity, if_match)
request.host = self._get_host()
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response) | def update_entity(self, table_name, entity, if_match='*', timeout=None) | Updates an existing entity in a table. Throws if the entity does not exist.
The update_entity operation replaces the entire entity and can be used to
remove properties.
:param str table_name:
The name of the table containing the entity to update.
:param entity:
The entity to update. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The update operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional update, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str | 3.05959 | 1.401605 | 2.182919 |
'''
Deletes an existing entity in a table. Throws if the entity does not exist.
When an entity is successfully deleted, the entity is immediately marked
for deletion and is no longer accessible to clients. The entity is later
removed from the Table service during garbage collection.
:param str table_name:
The name of the table containing the entity to delete.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('table_name', table_name)
request = _delete_entity(partition_key, row_key, if_match)
request.host = self._get_host()
request.query += [('timeout', _int_to_str(timeout))]
request.path = _get_entity_path(table_name, partition_key, row_key)
self._perform_request(request) | def delete_entity(self, table_name, partition_key, row_key,
if_match='*', timeout=None) | Deletes an existing entity in a table. Throws if the entity does not exist.
When an entity is successfully deleted, the entity is immediately marked
for deletion and is no longer accessible to clients. The entity is later
removed from the Table service during garbage collection.
:param str table_name:
The name of the table containing the entity to delete.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds. | 2.868679 | 1.448751 | 1.980105 |
'''
Replaces an existing entity or inserts a new entity if it does not
exist in the table. Because this operation can insert or update an
entity, it is also known as an "upsert" operation.
If insert_or_replace_entity is used to replace an entity, any properties
from the previous entity will be removed if the new entity does not define
them.
:param str table_name:
The name of the table in which to insert or replace the entity.
:param entity:
The entity to insert or replace. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_or_replace_entity(entity)
request.host = self._get_host()
request.query += [('timeout', _int_to_str(timeout))]
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
response = self._perform_request(request)
return _extract_etag(response) | def insert_or_replace_entity(self, table_name, entity, timeout=None) | Replaces an existing entity or inserts a new entity if it does not
exist in the table. Because this operation can insert or update an
entity, it is also known as an "upsert" operation.
If insert_or_replace_entity is used to replace an entity, any properties
from the previous entity will be removed if the new entity does not define
them.
:param str table_name:
The name of the table in which to insert or replace the entity.
:param entity:
The entity to insert or replace. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str | 3.232094 | 1.555 | 2.078517 |
'''
Merges an existing entity or inserts a new entity if it does not exist
in the table.
If insert_or_merge_entity is used to merge an entity, any properties from
the previous entity will be retained if the request does not define or
include them.
:param str table_name:
The name of the table in which to insert or merge the entity.
:param entity:
The entity to insert or merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_or_merge_entity(entity)
request.host = self._get_host()
request.query += [('timeout', _int_to_str(timeout))]
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
response = self._perform_request(request)
return _extract_etag(response) | def insert_or_merge_entity(self, table_name, entity, timeout=None) | Merges an existing entity or inserts a new entity if it does not exist
in the table.
If insert_or_merge_entity is used to merge an entity, any properties from
the previous entity will be retained if the request does not define or
include them.
:param str table_name:
The name of the table in which to insert or merge the entity.
:param entity:
The entity to insert or merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str | 3.298682 | 1.59879 | 2.063237 |
'''
Extracts table name from request.uri. The request.uri has either
"/mytable(...)" or "/mytable" format.
request:
the request to insert, update or delete entity
'''
if '(' in request.path:
pos = request.path.find('(')
return request.path[1:pos]
else:
return request.path[1:] | def get_request_table(self, request) | Extracts table name from request.uri. The request.uri has either
"/mytable(...)" or "/mytable" format.
request:
the request to insert, update or delete entity | 7.686598 | 2.150496 | 3.574337 |
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
if part_key is None:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return _get_etree_text(part_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2] | def get_request_partition_key(self, request) | Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity | 4.470072 | 2.565755 | 1.742205 |
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
if row_key is None:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
return _get_etree_text(row_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key | def get_request_row_key(self, request) | Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity | 4.479091 | 2.596884 | 1.724794 |
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request) | def validate_request_table(self, request) | Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity | 5.859806 | 2.403858 | 2.437667 |
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request) | def validate_request_partition_key(self, request) | Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity | 5.213264 | 2.310672 | 2.256168 |
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request)) | def validate_request_row_key(self, request) | Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity | 5.698828 | 2.314209 | 2.462538 |
'''
Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = [] | def begin_batch(self) | Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests. | 5.323659 | 1.477075 | 3.60419 |
'''
Adds request to batch operation.
request:
the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request) | def insert_request_to_batch(self, request) | Adds request to batch operation.
request:
the request to insert, update or delete entity | 5.856491 | 2.894801 | 2.023107 |
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
self.authentication.sign_request(request)
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0]) | def commit_batch_requests(self) | Commits the batch requests. | 2.580912 | 2.605888 | 0.990416 |
data = {'LicenseKey': key}
license_service_uri = (utils.get_subresource_path_by(self,
['Oem', 'Hpe', 'Links', 'LicenseService']))
self._conn.post(license_service_uri, data=data) | def set_license(self, key) | Set the license on a redfish system
:param key: license key | 7.083586 | 6.241598 | 1.134899 |
return virtual_media.VirtualMediaCollection(
self._conn, utils.get_subresource_path_by(self, 'VirtualMedia'),
redfish_version=self.redfish_version) | def virtual_media(self) | Property to provide reference to `VirtualMediaCollection` instance.
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | 3.778207 | 4.278071 | 0.883157 |
raise exception.IloCommandNotSupportedError(ERRMSG) | def set_iscsi_info(self, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | 29.881395 | 31.601547 | 0.945567 |
'''
Method for writeing Trajectory type ascii files files.
Parameters
----------
filename : string
The file where this data will be written.
data : list
A list of 1D data vectors with time, T and rho.
ageunit : integer, optional
If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit =
logtimerev in yrs. The default is 0. logtimerev is log of
time until end
tunit : integer, optional
If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0.
rhounit : integer, optional
If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0.
idNum : optional
An optional id argument
'''
if data==[]:
print('Please input correct data')
print('returning None')
return None
headers=[]
if ageunit ==1:
headers.append('AGEUNIT = SEC')
elif ageunit==0:
headers.append('AGEUNIT = YRS')
elif ageunit==2:
headers.append('AGEUNIT = logtimerev/yrs')
if tunit ==1:
headers.append('TUNIT = T9K')
elif tunit==0:
headers.append('TUNIT = T8K')
if rhounit ==1:
headers.append('RHOUNIT = LOG')
elif rhounit==0:
headers.append('RHOUNIT = CGS')
headers.append('ID = '+str(idNum))
write(filename,headers,['time','T','rho'],data,['YRS/SEC; T8K/T9K; CGS/LOG',"FORMAT: '(10x,A3)'"],trajectory=True) | def writeTraj(filename='trajectory.input', data=[], ageunit=0, tunit=0,
rhounit=0, idNum=0) | Method for writeing Trajectory type ascii files files.
Parameters
----------
filename : string
The file where this data will be written.
data : list
A list of 1D data vectors with time, T and rho.
ageunit : integer, optional
If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit =
logtimerev in yrs. The default is 0. logtimerev is log of
time until end
tunit : integer, optional
If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0.
rhounit : integer, optional
If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0.
idNum : optional
An optional id argument | 4.576436 | 1.950822 | 2.345902 |
'''
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
'''
isCol=False
isHead=False
if attri in self.dcols:
isCol=True
elif attri in self.hattrs:
isHead=True
else:
print("That attribute does not exist in this File")
print('Returning None')
if isCol:
return self.getColData(attri)
elif isHead:
return hattrs | def get(self, attri) | Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for. | 5.846485 | 3.219802 | 1.81579 |
'''
Private method that reads in the header and column data.
'''
if sldir.endswith(os.sep):
fileName = str(sldir)+str(fileName)
else:
fileName = str(sldir)+os.sep+str(fileName)
fileLines=[] #list of lines in the file
header=[] #list of Header lines
dataCols=[] #Dictionary of data column names
data=[] #List of Data lists
cols=[] #List of column names
f=open(fileName,'r')
fileLines=f.readlines()
i=0
if self.datatype != 'trajectory':
while i<len(fileLines):
if fileLines[i].startswith(self.header_char):
tmp=fileLines[i].lstrip(self.header_char)
header.append(tmp.strip())
else:
break
i+=1
cols=fileLines[i].split(sep)
tmp=[]
tmp1=[]
for j in range(len(cols)):
tmp1=cols[j].strip()
if tmp1 !='':
tmp.append(tmp1)
cols=tmp
i+=1
else:
header={}
while fileLines[i].startswith('#') or '=' in fileLines[i]:
if fileLines[i].startswith('#') and cols==[]:
cols=fileLines[i].strip('#')
cols=cols.strip()
cols=cols.split()
elif fileLines[i].startswith('#'):
tmp1=fileLines[i].strip('#')
tmp1=tmp1.strip()
self.headerLines.append(tmp1)
elif not fileLines[i].startswith('#'):
tmp=fileLines[i].split('=')
tmp[0]=tmp[0].strip()
tmp[1]=tmp[1].strip()
if header=={}:
header={str(tmp[0]):str(tmp[1])}
else:
header[str(tmp[0])]=str(tmp[1])
i+=1
while i<len(fileLines):
if fileLines[i].startswith('#'):
i=i+1
else:
tmp=fileLines[i].split()
for j in range(len(tmp)):
tmp[j]=tmp[j].strip()
data.append(tmp)
i+=1
tmp=[]
tmp1=[]
for j in range(len(data)):
for k in range(len(data[j])):
tmp1=data[j][k].strip()
if tmp1 !='':
tmp.append(tmp1)
data[j]=tmp
tmp=[]
tmp=[]
for j in range(len(cols)):
for k in range(len(data)):
try:
a=float(data[k][j])
tmp.append(a)
except ValueError:
tmp.append(data[k][j])
#else:
# tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j]))
tmp=array(tmp)
if j == 0:
dataCols={cols[j]:tmp}
else:
dataCols[cols[j]]=tmp
tmp=[]
return header,dataCols | def _readFile(self, sldir, fileName, sep) | Private method that reads in the header and column data. | 2.343113 | 2.230439 | 1.050517 |
'''
INtiial to final mass relation
'''
final_m=[]
ini_m=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
ini_m.append(sefiles.get("mini"))
h1=sefiles.get(int(sefiles.se.cycles[-2]),'H-1')
mass=sefiles.get(int(sefiles.se.cycles[-2]),'mass')
idx=-1
for k in range(len(h1)):
if h1[k]>0.1:
idx=k
break
final_m.append(mass[idx])
label='Z='+str(sefiles.get('zini'))
plt.plot(ini_m,final_m,label=label,marker=marker,linestyle=linestyle)
plt.xlabel('$M_{Initial} [M_{\odot}]$',size=23)
plt.ylabel('$M_{Final} [M_{\odot}]$',size=23) | def initial_finall_mass_relation(self,marker='o',linestyle='--') | INtiial to final mass relation | 4.169947 | 3.684368 | 1.131794 |
'''
For paper1 marco routine:
Numbers of remnant mass shell masses, exists also in mesa_set!
'''
inim=[]
remnm=[]
for i in range(len(self.runs_H5_surf)):
m1p65_last=se(self.runs_H5_out[i])
mass_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-1],'mass')
top_of_envelope=mass_dummy[len(mass_dummy)-1]
h_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-1],'iso_massf','H-1')
for j in range(len(mass_dummy)):
if h_dummy[j] > 0.05:
bottom_of_envelope = mass_dummy[j]
break
inim.append(m1p65_last.get("mini"))
remnm.append(bottom_of_envelope)
print "M_initial | M_remn/bottom of envelope"
for i in range(len(inim)):
print inim[i],"|",remnm[i] | def final_bottom_envelope_set1(self) | For paper1 marco routine:
Numbers of remnant mass shell masses, exists also in mesa_set! | 5.929267 | 3.872764 | 1.531017 |
'''
For paper1 extension:
bottom_envelope
Numbers of remnant mass shell masses, exists also in mesa_set + star age!
'''
inim=[]
remnm=[]
time11=[]
tottime=[]
c_core=[]
o_core=[]
small_co_core=[]
c_core_center=[]
for i in range(len(self.runs_H5_surf)):
m1p65_last=se(self.runs_H5_out[i])
mass_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'mass')
top_of_envelope=mass_dummy[len(mass_dummy)-1]
h_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'iso_massf','H-1')
c_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'iso_massf','C-12')
o_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'iso_massf','O-16')
for j in range(len(mass_dummy)):
if h_dummy[j] > 1e-1:
bottom_of_envelope = mass_dummy[j]
break
inim.append(m1p65_last.get("mini"))
remnm.append(bottom_of_envelope)
###Calculate the lifetime (MS)
sefiles=m1p65_last
cycs=[]
for k in range(5,len(sefiles.se.cycles),5):
cycs.append(int(sefiles.se.cycles[k]))
w=0
for cyc in cycs:
c12_center=sefiles.get(cyc,'C-12')[0]
#c12_center=c12[w][0]
w+=1
if c12_center>1e-1:
time1=(sefiles.get(cyc,'age')*sefiles.get('age_unit'))/31557600.
time11.append(time1)
break
tottime.append(sefiles.get(int(sefiles.se.cycles[-1]),'age')/31557600.)
print "M_initial | M_remn/bottom of envelope | total lifetime"
for i in range(len(inim)):
print inim[i],"|",'{:.3E}'.format(remnm[i]),"|",'{:.3E}'.format(tottime[i]) | def remnant_lifetime_agb(self) | For paper1 extension:
bottom_envelope
Numbers of remnant mass shell masses, exists also in mesa_set + star age! | 4.418185 | 3.401252 | 1.298988 |
'''
PLots C/O surface number fraction
'''
if len(t0_model)==0:
t0_model = len(self.runs_H5_surf)*[0]
plt.figure(fig)
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_surf[i])
cycles=range(int(sefiles.se.cycles[0]),int(sefiles.se.cycles[-1]),sparsity)
mini=sefiles.get("mini")
zini=sefiles.get("zini")
label=str(mini)+'$M_{\odot}$, Z='+str(zini)
if xaxis=='cycles':
x=cycles
if xaxis=='age':
x=sefiles.get(cycles,'age')
if age_years==True:
x=np.array(x)*sefiles.get('age_unit')/(365*24*3600)
x = x - x[t0_model[i]]
if xaxis=='mass':
x=sefiles.get(cycles,'mass')
x=x[t0_model[i]:]
c12=sefiles.get(cycles,'C-12')[t0_model[i]:]
o16=sefiles.get(cycles,'O-16')[t0_model[i]:]
if withoutZlabel==True:
plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label.split(',')[0],marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i])
else:
plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label,marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i])
if xaxis=='mass':
plt.xlim(7,0.5)
#plt.gca().invert_xaxis()
plt.xlabel('$M/M_{\odot}$',fontsize=18)
plt.ylabel('C/O Ratio', fontsize=18)
plt.legend(loc=1) | def set_plot_CO_mass(self,fig=3123,xaxis='mass',linestyle=['-'],marker=['o'],color=['r'],age_years=True,sparsity=500,markersparsity=200,withoutZlabel=False,t0_model=[]) | PLots C/O surface number fraction | 2.918179 | 2.741885 | 1.064296 |
linestyle=200*['-']
import nugridse as mp
import utils as u
#print self.runs_H5_restart
for i in range(len(self.runs_H5_restart)):
sefiles=mp.se(self.runs_H5_restart[i])
cycle=cycles[i]
if cycle==-1:
cycle=int(sefiles.se.cycles[-1])
if mass_range[i][0] ==0 and mass_range[i][1]==0:
mass_range[i][1]=sefiles.get(cycle,'mass')[-1]
sefiles.read_iso_abund_marco(mass_range[i],cycle)
u.stable_specie()
sefiles.decay(sefiles.mass_frac)
idx_species=[]
for k in range(len(isotopes)):
other_name_scheme=isotopes[k].split("-")[0].upper()+(5-len(isotopes[k])+1)*" "+isotopes[k].split("-")[1]
#other_name_scheme=other_name_scheme.capitalize()
idx_specie=u.back_ind[other_name_scheme]
idx_species.append(idx_specie)
mass_abu_array=[]
for idx_specie in idx_species:
mass_abu_array.append([])
for idx_mass in range(len(mp.decayed_multi_d)):
mass_abu_array[-1].append(mp.decayed_multi_d[idx_mass][idx_specie])
#plotting
plt.figure(self.run_dirs_name[i])
#print len(mp.used_masses),len(mass_abu_array[0])
#print mass_abu_array[0]
for k in range(len(isotopes)):
plt.plot(mp.used_masses,mass_abu_array[k],linestyle=linestyle[k],label=isotopes[k])
plt.legend()
plt.yscale('log')
#print sefiles.get(cycle,'mass')[-1]
plt.xlabel('M/Msun')
plt.ylabel('$X_i$')
plt.xlim(mass_range[i][0],mass_range[i][1])
if (ylim[i][0]>0 or ylim[i][1]>0) or (ylim[i][0]>0 and ylim[i][1]>0):
plt.ylim(ylim[i][0],ylim[i][1])
if len(save_dir)>0:
star_mass=sefiles.get("mini")
star_z=sefiles.get("zini")
plt.savefig(save_dir+'/'+self.run_dirs_name[i]+'_decay_profiles.png') | def set_plot_profile_decay(self,cycles=20*[-1],mass_range=20*[[0,0]],ylim=20*[[0,0]],isotopes=[],linestyle=[],save_dir=''):
'''
Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs
'''
if len(linestyle)==0 | Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs | 3.44948 | 3.455914 | 0.998138 |
linestyle=200*['-']
import nugridse as mp
import utils as u
print self.runs_H5_restart
massfrac_all=[]
iso_all=[]
for i in range(len(self.runs_H5_restart)):
sefiles=mp.se(self.runs_H5_restart[i])
cycle=cycles[i]
if cycle==-1:
cycle=int(sefiles.se.cycles[-1])
if mass_range[i][0] ==0 and mass_range[i][1]==0:
mass_range[i][1]=sefiles.get(cycle,'mass')[-1]
sefiles.read_iso_abund_marco(mass_range[i],cycle)
u.stable_specie()
sefiles.decay(sefiles.mass_frac)
idx_species=[]
massfrac=[]
iso=[]
if not isotopes[0]=='all':
for k in range(len(isotopes)):
other_name_scheme=isotopes[k].split("-")[0].upper()+(5-len(isotopes[k])+1)*" "+isotopes[k].split("-")[1]
#other_name_scheme=other_name_scheme.capitalize()
idx_specie=u.back_ind[other_name_scheme]
idx_species.append(idx_specie)
massfrac.append(average_massfrac_decay[idx_specie])
iso=isotopes
else:
massfrac=mp.average_mass_frac_decay
other_name_scheme=u.back_ind
iso=[]
import re
for kk in range(len(other_name_scheme)):
list1=re.split('(\d+)',other_name_scheme[kk])
newname=list1[0].capitalize()+'-'+list1[1]
iso.append(newname)
massfrac_all.append(massfrac)
iso_all.append(iso)
return iso_all,massfrac_all | def set_get_abu_distr_decay_old(self,cycles=20*[-1],mass_range=20*[[0,0]],ylim=20*[[0,0]],isotopes=['all'],linestyle=[],save_dir=''):
'''
Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs
'''
if len(linestyle)==0 | Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs | 4.27055 | 4.244504 | 1.006136 |
'''
Uesse function cores in nugridse.py
'''
core_info=[]
minis=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
mini=sefiles.get('mini')
minis.append(mini)
incycle=int(sefiles.se.cycles[-1])
core_info.append(sefiles.cores(incycle=incycle))
print_info=''
for i in range(len(self.runs_H5_surf)):
if i ==0:
print 'Following returned for each initial mass'
print core_info[i][1]
#print '----Mini: ',minis[i],'------'
print_info+=(str(minis[i])+' & ')
info=core_info[i][0]
for k in range(len(info)):
print_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ')
print_info=(print_info+'\n')
#print core_info[i][2]
f1=open(filename,'a')
f1.write(print_info)
f1.close() | def set_cores_massive(self,filename='core_masses_massive.txt') | Uesse function cores in nugridse.py | 4.818819 | 4.003379 | 1.203688 |
'''
Outputs burnign stages as done in burningstages_upgrade (nugridse)
'''
burn_info=[]
burn_mini=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
burn_info.append(sefiles.burnstage_upgrade())
mini=sefiles.get('mini')
#zini=sefiles.get('zini')
burn_mini.append(mini)
for i in range(len(self.runs_H5_surf)):
print 'Following returned for each initial mass'
print '[burn_cycles,burn_ages, burn_abun, burn_type,burn_lifetime]'
print '----Mini: ',burn_mini[i],'------'
print burn_info[i] | def set_burnstages_upgrade_massive(self) | Outputs burnign stages as done in burningstages_upgrade (nugridse) | 9.4445 | 6.187092 | 1.526484 |
linestyle=200*['-']
plt.figure('CC evol')
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
t1_model=-1
sefiles.get('temperature')
sefiles.get('density')
mini=sefiles.get('mini')
zini=sefiles.get('zini')
model=sefiles.se.cycles
model_list=[]
for k in range(0,len(model),1):
model_list.append(model[k])
rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)]
T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)]
rho=[]
T=[]
T_unit=sefiles.get('temperature_unit')
labeldone=False
for k in range(len(model_list)):
t9=np.array(T1[k])*T_unit/1e9
T.append(max(t9))
rho.append(max(rho1[k]))
label=str(mini)+'$M_{\odot}$, Z='+str(zini)
plt.plot(T,rho,label=label,color=color[i],marker=marker[i],markevery=markevery)
plt.xlabel('$T_{9,max} (GK)$')
plt.ylabel(r'$\rho [cm^{-3}]$')
plt.yscale('log')
plt.xscale('log')
plt.legend(loc=2) | def set_plot_CC_T_rho_max(self,linestyle=[],burn_limit=0.997,color=['r'],marker=['o'],markevery=500):
'''
Plots
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs
'''
if len(linestyle)==0 | Plots
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs | 4.032911 | 4.078797 | 0.98875 |
linestyle=200*['-']
plt.figure('CC evol')
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
t1_model=-1
sefiles.get('temperature')
sefiles.get('density')
mini=sefiles.get('mini')
zini=sefiles.get('zini')
model=sefiles.se.cycles
model_list=[]
for k in range(0,len(model),1):
model_list.append(model[k])
rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)]
T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)]
rho=[]
T=[]
T_unit=sefiles.get('temperature_unit')
labeldone=False
for k in range(len(model_list)):
t9=np.array(T1[k])*T_unit/1e9
T_delrem=[]
rho_delrem=[]
if k ==0:
T.append(max(t9))
rho.append(max(rho1[k]))
for h in range(len(rho1[k])):
if rho1[k][h] < 1e3:
T_delrem.append(t9[h])
rho_delrem.append(rho1[k][h])
if nolabelZ==True:
plt.plot(T_delrem,rho_delrem,label=self.extra_label[i].split(',')[0],color=color[i],marker=marker[i],markevery=markevery)
else:
plt.plot(T_delrem,rho_delrem,label=self.extra_label[i],color=color[i],marker=marker[i],markevery=markevery)
else:
if (max(rho)<max(rho1[k]) or max(T)<max(t9)):
for h in range(len(rho1[k])):
if rho1[k][h] > 1e3:
T_delrem.append(t9[h])
rho_delrem.append(rho1[k][h])
if labeldone==True:
plt.plot(T_delrem,rho_delrem,color=color[i],marker=marker[i],markevery=markevery)
else:
label=str(mini)+'$M_{\odot}$, Z='+str(zini)
if nolabelZ==True:
plt.plot(T_delrem,rho_delrem,label=label.split(',')[0],color=color[i],marker=marker[i],markevery=markevery)
else:
plt.plot(T_delrem,rho_delrem,label=label,color=color[i],marker=marker[i],markevery=markevery)
labeldone=True
T.append(max(t9))
rho.append(max(rho1[k]))
else:
break
#else:
plt.xlabel('$T_9 [GK]$',size=22)
plt.ylabel(r'$\rho [cm^{-3}]$',size=22)
plt.yscale('log')
plt.xscale('log')
plt.legend(loc=2) | def set_plot_CC_T_rho(self,linestyle=[],burn_limit=0.997,color=['r'],marker=['o'],nolabelZ=False,markevery=500):
'''
Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs
'''
if len(linestyle)==0 | Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs | 2.582475 | 2.589892 | 0.997136 |
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) | def _get_request_body_bytes_only(param_name, param_value) | Validates the request body passed in and converts it to bytes
if our policy allows it. | 5.804671 | 3.012633 | 1.926777 |
try:
return getattr(resource, attribute_name)
except (sushy.exceptions.SushyError,
exception.MissingAttributeError) as e:
msg = (('The Redfish controller failed to get the '
'attribute %(attribute)s from resource %(resource)s. '
'Error %(error)s') % {'error': str(e),
'attribute': attribute_name,
'resource':
resource.__class__.__name__})
LOG.debug(msg)
return default | def _get_attribute_value_of(resource, attribute_name, default=None) | Gets the value of attribute_name from the resource
It catches the exception, if any, while retrieving the
value of attribute_name from resource and returns default.
:param resource: The resource object
:attribute_name: Property of the resource
:returns the property value if no error encountered
else return 0. | 3.064191 | 3.016099 | 1.015945 |
local_max_bytes = 0
logical_max_mib = 0
volume_max_bytes = 0
physical_max_mib = 0
drives_max_bytes = 0
simple_max_bytes = 0
# Gets the resources and properties
# its quite possible for a system to lack the resource, hence its
# URI may also be lacking.
# Check if smart_storage resource exist at the system
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
# Check if storage resource exist at the system
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if smart_resource is not None:
logical_max_mib = _get_attribute_value_of(
smart_resource, 'logical_drives_maximum_size_mib', default=0)
if storage_resource is not None:
volume_max_bytes = _get_attribute_value_of(
storage_resource, 'volumes_maximum_size_bytes', default=0)
# Get the largest volume from the system.
local_max_bytes = utils.max_safe([(logical_max_mib * 1024 * 1024),
volume_max_bytes])
# if volume is not found, then traverse through the possible disk drives
# and get the biggest disk.
if local_max_bytes == 0:
if smart_resource is not None:
physical_max_mib = _get_attribute_value_of(
smart_resource, 'physical_drives_maximum_size_mib', default=0)
if storage_resource is not None:
drives_max_bytes = _get_attribute_value_of(
storage_resource, 'drives_maximum_size_bytes', default=0)
# Check if the SimpleStorage resource exist at the system.
simple_resource = _get_attribute_value_of(system_obj,
'simple_storages')
if simple_resource is not None:
simple_max_bytes = _get_attribute_value_of(
simple_resource, 'maximum_size_bytes', default=0)
local_max_bytes = utils.max_safe([(physical_max_mib * 1024 * 1024),
drives_max_bytes, simple_max_bytes])
# Convert the received size to GB and reduce the value by 1 Gb as
# ironic requires the local_gb to be returned 1 less than actual size.
local_gb = 0
if local_max_bytes > 0:
local_gb = int(local_max_bytes / (1024 * 1024 * 1024)) - 1
else:
msg = ('The maximum size for the hard disk or logical '
'volume could not be determined.')
LOG.debug(msg)
return local_gb | def get_local_gb(system_obj) | Gets the largest volume or the largest disk
:param system_obj: The HPESystem object.
:returns the size in GB | 2.895013 | 2.83986 | 1.019421 |
smart_value = False
storage_value = False
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
if smart_resource is not None:
smart_value = _get_attribute_value_of(
smart_resource, 'has_ssd', default=False)
if smart_value:
return smart_value
# Its returned before just to avoid hitting BMC if we have
# already got the SSD device above.
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_ssd', default=False)
return storage_value | def has_ssd(system_obj) | Gets if the system has any drive as SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives. | 3.994038 | 4.211201 | 0.948432 |
storage_value = False
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_nvme_ssd', default=False)
return storage_value | def has_nvme_ssd(system_obj) | Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe. | 3.883762 | 4.706912 | 0.825119 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.