sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def read_raid(self, raid_config=None): """Read the logical drives from the system :param raid_config: None or a dictionary containing target raid configuration data. This data stucture should be as follows: raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100, 'physical_disks': ['6I:1:5'], 'controller': 'HPE Smart Array P408i-a SR Gen10'}, <info-for-logical-disk-2>]} :returns: A dictionary containing list of logical disks """ self.check_smart_storage_config_ids() if raid_config: # When read called after create raid, user can pass raid config # as a input result = self._post_create_read_raid(raid_config=raid_config) else: # When read called after delete raid, there will be no input # passed by user then result = self._post_delete_read_raid() return result
Read the logical drives from the system :param raid_config: None or a dictionary containing target raid configuration data. This data stucture should be as follows: raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100, 'physical_disks': ['6I:1:5'], 'controller': 'HPE Smart Array P408i-a SR Gen10'}, <info-for-logical-disk-2>]} :returns: A dictionary containing list of logical disks
entailment
def _perform_request(self, request, parser=None, parser_args=None, operation_context=None): ''' Sends the request and return response. Catches HTTPError and hands it to error handler ''' operation_context = operation_context or _OperationContext() retry_context = RetryContext() # Apply the appropriate host based on the location mode self._apply_host(request, operation_context, retry_context) # Apply common settings to the request _update_request(request) while(True): try: try: # Execute the request callback if self.request_callback: self.request_callback(request) # Add date and auth after the callback so date doesn't get too old and # authentication is still correct if signed headers are added in the request # callback. This also ensures retry policies with long back offs # will work as it resets the time sensitive headers. _add_date_header(request) self.authentication.sign_request(request) # Set the request context retry_context.request = request # Perform the request response = self._httpclient.perform_request(request) # Execute the response callback if self.response_callback: self.response_callback(response) # Set the response context retry_context.response = response # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException if response.status >= 300: # This exception will be caught by the general error handler # and raised as an azure http exception _http_error_handler(HTTPError(response.status, response.message, response.headers, response.body)) # Parse the response if parser: if parser_args: args = [response] args.extend(parser_args) return parser(*args) else: return parser(response) else: return except AzureException as ex: raise ex except Exception as ex: if sys.version_info >= (3,): # Automatic chaining in Python 3 means we keep the trace raise AzureException(ex.args[0]) else: # There isn't a good solution in 2 for keeping the stack trace # in general, or that will not result in an error in 3 # However, we can keep the previous error type and message # TODO: In the future we will log the trace msg = "" if len(ex.args) > 0: msg = ex.args[0] raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) except AzureException as ex: # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc) # will not be resolved with retries. if str(ex) == _ERROR_DECRYPTION_FAILURE: raise ex # Determine whether a retry should be performed and if so, how # long to wait before performing retry. retry_interval = self.retry(retry_context) if retry_interval is not None: # Execute the callback if self.retry_callback: self.retry_callback(retry_context) # Sleep for the desired retry interval sleep(retry_interval) else: raise ex finally: # If this is a location locked operation and the location is not set, # this is the first request of that operation. Set the location to # be used for subsequent requests in the operation. if operation_context.location_lock and not operation_context.host_location: operation_context.host_location = {retry_context.location_mode: request.host}
Sends the request and return response. Catches HTTPError and hands it to error handler
entailment
def _convert_json_to_entity(entry_element, property_resolver): ''' Convert json response to entity. The entity format is: { "Address":"Mountain View", "Age":23, "AmountDue":200.23, "CustomerCode@odata.type":"Edm.Guid", "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833", "CustomerSince@odata.type":"Edm.DateTime", "CustomerSince":"2008-07-10T00:00:00", "IsActive":true, "NumberOfOrders@odata.type":"Edm.Int64", "NumberOfOrders":"255", "PartitionKey":"mypartitionkey", "RowKey":"myrowkey" } ''' entity = Entity() properties = {} edmtypes = {} odata = {} for name, value in entry_element.items(): if name.startswith('odata.'): odata[name[6:]] = value elif name.endswith('@odata.type'): edmtypes[name[:-11]] = value else: properties[name] = value # Partition key is a known property partition_key = properties.pop('PartitionKey', None) if partition_key: entity['PartitionKey'] = partition_key # Row key is a known property row_key = properties.pop('RowKey', None) if row_key: entity['RowKey'] = row_key # Timestamp is a known property timestamp = properties.pop('Timestamp', None) if timestamp: entity['Timestamp'] = _from_entity_datetime(timestamp) for name, value in properties.items(): mtype = edmtypes.get(name); # use the property resolver if present if property_resolver: mtype = property_resolver(partition_key, row_key, name, value, mtype) # throw if the type returned is not a valid edm type if mtype and mtype not in _EDM_TYPES: raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype)) # Add type for Int32 if type(value) is int: mtype = EdmType.INT32 # no type info, property should parse automatically if not mtype: entity[name] = value else: # need an object to hold the property conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype) if conv is not None: try: property = conv(value) except Exception as e: # throw if the type returned by the property resolver # cannot be used in the conversion if property_resolver: raise AzureException( _ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype)) else: raise e else: property = EntityProperty(mtype, value) entity[name] = property # extract etag from entry etag = odata.get('etag') if timestamp: etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"' entity['etag'] = etag return entity
Convert json response to entity. The entity format is: { "Address":"Mountain View", "Age":23, "AmountDue":200.23, "CustomerCode@odata.type":"Edm.Guid", "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833", "CustomerSince@odata.type":"Edm.DateTime", "CustomerSince":"2008-07-10T00:00:00", "IsActive":true, "NumberOfOrders@odata.type":"Edm.Int64", "NumberOfOrders":"255", "PartitionKey":"mypartitionkey", "RowKey":"myrowkey" }
entailment
def _convert_json_response_to_tables(response): ''' Converts the response to tables class. ''' if response is None: return response tables = _list() continuation = _get_continuation_from_response_headers(response) tables.next_marker = continuation.get('NextTableName') root = loads(response.body.decode('utf-8')) if 'TableName' in root: table = Table() table.name = root['TableName'] tables.append(table) else: for element in root['value']: table = Table() table.name = element['TableName'] tables.append(table) return tables
Converts the response to tables class.
entailment
def _convert_json_response_to_entities(response, property_resolver): ''' Converts the response to tables class. ''' if response is None: return response entities = _list() entities.next_marker = _get_continuation_from_response_headers(response) root = loads(response.body.decode('utf-8')) if 'value' in root: for entity in root['value']: entities.append(_convert_json_to_entity(entity, property_resolver)) else: entities.append(_convert_json_to_entity(entity, property_resolver)) return entities
Converts the response to tables class.
entailment
def _extract_etag(response): ''' Extracts the etag from the response headers. ''' if response and response.headers: for name, value in response.headers: if name.lower() == 'etag': return value return None
Extracts the etag from the response headers.
entailment
def generate_table(self, table_name, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, start_pk=None, start_rk=None, end_pk=None, end_rk=None): ''' Generates a shared access signature for the table. Use the returned signature with the sas_token parameter of TableService. :param str table_name: Name of table. :param TablePermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: date or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: date or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_blob_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.models.Protocol` for possible values. :param str start_pk: The minimum partition key accessible with this shared access signature. startpk must accompany startrk. Key values are inclusive. If omitted, there is no lower bound on the table entities that can be accessed. :param str start_rk: The minimum row key accessible with this shared access signature. startpk must accompany startrk. Key values are inclusive. If omitted, there is no lower bound on the table entities that can be accessed. :param str end_pk: The maximum partition key accessible with this shared access signature. endpk must accompany endrk. Key values are inclusive. If omitted, there is no upper bound on the table entities that can be accessed. :param str end_rk: The maximum row key accessible with this shared access signature. endpk must accompany endrk. Key values are inclusive. If omitted, there is no upper bound on the table entities that can be accessed. ''' sas = _SharedAccessHelper() sas.add_base(permission, expiry, start, ip, protocol) sas.add_id(id) sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk) sas.add_resource_signature(self.account_name, self.account_key, 'table', table_name) return sas.get_token()
Generates a shared access signature for the table. Use the returned signature with the sas_token parameter of TableService. :param str table_name: Name of table. :param TablePermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: date or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: date or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_blob_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.models.Protocol` for possible values. :param str start_pk: The minimum partition key accessible with this shared access signature. startpk must accompany startrk. Key values are inclusive. If omitted, there is no lower bound on the table entities that can be accessed. :param str start_rk: The minimum row key accessible with this shared access signature. startpk must accompany startrk. Key values are inclusive. If omitted, there is no lower bound on the table entities that can be accessed. :param str end_pk: The maximum partition key accessible with this shared access signature. endpk must accompany endrk. Key values are inclusive. If omitted, there is no upper bound on the table entities that can be accessed. :param str end_rk: The maximum row key accessible with this shared access signature. endpk must accompany endrk. Key values are inclusive. If omitted, there is no upper bound on the table entities that can be accessed.
entailment
def generate_blob(self, container_name, blob_name, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None): ''' Generates a shared access signature for the blob. Use the returned signature with the sas_token parameter of any BlobService. :param str container_name: Name of container. :param str blob_name: Name of blob. :param BlobPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Permissions must be ordered read, write, delete, list. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: date or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: date or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_blob_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.models.Protocol` for possible values. :param str cache_control: Response header value for Cache-Control when resource is accessed using this shared access signature. :param str content_disposition: Response header value for Content-Disposition when resource is accessed using this shared access signature. :param str content_encoding: Response header value for Content-Encoding when resource is accessed using this shared access signature. :param str content_language: Response header value for Content-Language when resource is accessed using this shared access signature. :param str content_type: Response header value for Content-Type when resource is accessed using this shared access signature. ''' resource_path = container_name + '/' + blob_name sas = _SharedAccessHelper() sas.add_base(permission, expiry, start, ip, protocol) sas.add_id(id) sas.add_resource('b') sas.add_override_response_headers(cache_control, content_disposition, content_encoding, content_language, content_type) sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path) return sas.get_token()
Generates a shared access signature for the blob. Use the returned signature with the sas_token parameter of any BlobService. :param str container_name: Name of container. :param str blob_name: Name of blob. :param BlobPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Permissions must be ordered read, write, delete, list. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: date or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: date or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_blob_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.models.Protocol` for possible values. :param str cache_control: Response header value for Cache-Control when resource is accessed using this shared access signature. :param str content_disposition: Response header value for Content-Disposition when resource is accessed using this shared access signature. :param str content_encoding: Response header value for Content-Encoding when resource is accessed using this shared access signature. :param str content_language: Response header value for Content-Language when resource is accessed using this shared access signature. :param str content_type: Response header value for Content-Type when resource is accessed using this shared access signature.
entailment
def list_tables(self, num_results=None, marker=None, timeout=None): ''' Returns a generator to list the tables. The generator will lazily follow the continuation tokens returned by the service and stop when all tables have been returned or num_results is reached. If num_results is specified and the account has more than that number of tables, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param int num_results: The maximum number of tables to return. :param marker: An opaque continuation object. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :type marker: obj :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. :return: A generator which produces :class:`~azure.storage.models.table.Table` objects. :rtype: :class:`~azure.storage.models.ListGenerator`: ''' kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout} resp = self._list_tables(**kwargs) return ListGenerator(resp, self._list_tables, (), kwargs)
Returns a generator to list the tables. The generator will lazily follow the continuation tokens returned by the service and stop when all tables have been returned or num_results is reached. If num_results is specified and the account has more than that number of tables, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param int num_results: The maximum number of tables to return. :param marker: An opaque continuation object. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :type marker: obj :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. :return: A generator which produces :class:`~azure.storage.models.table.Table` objects. :rtype: :class:`~azure.storage.models.ListGenerator`:
entailment
def _list_tables(self, max_results=None, marker=None, timeout=None): ''' Returns a list of tables under the specified account. Makes a single list request to the service. Used internally by the list_tables method. :param int max_results: The maximum number of tables to return. A single list request may return up to 1000 tables and potentially a continuation token which should be followed to get additional resutls. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param int timeout: The server timeout, expressed in seconds. :return: A list of tables, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.models.table.Table`: ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/Tables' request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)] request.query = [ ('$top', _int_to_str(max_results)), ('NextTableName', _to_str(marker)), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_json_response_to_tables(response)
Returns a list of tables under the specified account. Makes a single list request to the service. Used internally by the list_tables method. :param int max_results: The maximum number of tables to return. A single list request may return up to 1000 tables and potentially a continuation token which should be followed to get additional resutls. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param int timeout: The server timeout, expressed in seconds. :return: A list of tables, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.models.table.Table`:
entailment
def create_table(self, table_name, fail_on_exist=False, timeout=None): ''' Creates a new table in the storage account. :param str table_name: The name of the table to create. The table name may contain only alphanumeric characters and cannot begin with a numeric character. It is case-insensitive and must be from 3 to 63 characters long. :param bool fail_on_exist: Specifies whether to throw an exception if the table already exists. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the table was created. If fail_on_exist was set to True, this will throw instead of returning false. :rtype: bool ''' _validate_not_none('table', table_name) request = HTTPRequest() request.method = 'POST' request.host = self._get_host() request.path = '/Tables' request.query = [('timeout', _int_to_str(timeout))] request.headers = [_DEFAULT_CONTENT_TYPE_HEADER, _DEFAULT_PREFER_HEADER, _DEFAULT_ACCEPT_HEADER] request.body = _get_request_body(_convert_table_to_json(table_name)) if not fail_on_exist: try: self._perform_request(request) return True except AzureHttpError as ex: _dont_fail_on_exist(ex) return False else: self._perform_request(request) return True
Creates a new table in the storage account. :param str table_name: The name of the table to create. The table name may contain only alphanumeric characters and cannot begin with a numeric character. It is case-insensitive and must be from 3 to 63 characters long. :param bool fail_on_exist: Specifies whether to throw an exception if the table already exists. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the table was created. If fail_on_exist was set to True, this will throw instead of returning false. :rtype: bool
entailment
def exists(self, table_name, timeout=None): ''' Returns a boolean indicating whether the table exists. :param str table_name: The name of table to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the table exists. :rtype: bool ''' _validate_not_none('table_name', table_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/Tables' + "('" + table_name + "')" request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)] request.query = [('timeout', _int_to_str(timeout))] try: self._perform_request(request) return True except AzureHttpError as ex: _dont_fail_not_exist(ex) return False
Returns a boolean indicating whether the table exists. :param str table_name: The name of table to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the table exists. :rtype: bool
entailment
def get_table_acl(self, table_name, timeout=None): ''' Returns details about any stored access policies specified on the table that may be used with Shared Access Signatures. :param str table_name: The name of an existing table. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the table. :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`: ''' _validate_not_none('table_name', table_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/' + _to_str(table_name) request.query = [ ('comp', 'acl'), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_xml_to_signed_identifiers(response.body)
Returns details about any stored access policies specified on the table that may be used with Shared Access Signatures. :param str table_name: The name of an existing table. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the table. :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`:
entailment
def set_table_acl(self, table_name, signed_identifiers=None, timeout=None): ''' Sets stored access policies for the table that may be used with Shared Access Signatures. When you set permissions for a table, the existing permissions are replaced. To update the table’s permissions, call :func:`~get_table_acl` to fetch all access policies associated with the table, modify the access policy that you wish to change, and then call this function with the complete set of data to perform the update. When you establish a stored access policy on a table, it may take up to 30 seconds to take effect. During this interval, a shared access signature that is associated with the stored access policy will throw an :class:`AzureHttpError` until the access policy becomes active. :param str table_name: The name of an existing table. :param signed_identifiers: A dictionary of access policies to associate with the table. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy` :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('table_name', table_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = '/' + _to_str(table_name) request.query = [ ('comp', 'acl'), ('timeout', _int_to_str(timeout)), ] request.body = _get_request_body( _convert_signed_identifiers_to_xml(signed_identifiers)) self._perform_request(request)
Sets stored access policies for the table that may be used with Shared Access Signatures. When you set permissions for a table, the existing permissions are replaced. To update the table’s permissions, call :func:`~get_table_acl` to fetch all access policies associated with the table, modify the access policy that you wish to change, and then call this function with the complete set of data to perform the update. When you establish a stored access policy on a table, it may take up to 30 seconds to take effect. During this interval, a shared access signature that is associated with the stored access policy will throw an :class:`AzureHttpError` until the access policy becomes active. :param str table_name: The name of an existing table. :param signed_identifiers: A dictionary of access policies to associate with the table. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy` :param int timeout: The server timeout, expressed in seconds.
entailment
def query_entities(self, table_name, filter=None, select=None, num_results=None, marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None): ''' Returns a generator to list the entities in the table specified. The generator will lazily follow the continuation tokens returned by the service and stop when all entities have been returned or max_results is reached. If max_results is specified and the account has more than that number of entities, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int num_results: The maximum number of entities to return. :param marker: An opaque continuation object. This value can be retrieved from the next_marker field of a previous generator object if max_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :type marker: obj :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. :return: A generator which produces :class:`~azure.storage.table.models.Entity` objects. :rtype: :class:`~azure.storage.models.ListGenerator` ''' args = (table_name,) kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker, 'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout} resp = self._query_entities(*args, **kwargs) return ListGenerator(resp, self._query_entities, args, kwargs)
Returns a generator to list the entities in the table specified. The generator will lazily follow the continuation tokens returned by the service and stop when all entities have been returned or max_results is reached. If max_results is specified and the account has more than that number of entities, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int num_results: The maximum number of entities to return. :param marker: An opaque continuation object. This value can be retrieved from the next_marker field of a previous generator object if max_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :type marker: obj :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. :return: A generator which produces :class:`~azure.storage.table.models.Entity` objects. :rtype: :class:`~azure.storage.models.ListGenerator`
entailment
def _query_entities(self, table_name, filter=None, select=None, max_results=None, marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None): ''' Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int top: The maximum number of entities to return. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.table.models.Entity` ''' _validate_not_none('table_name', table_name) _validate_not_none('accept', accept) next_partition_key = None if marker is None else marker.get('nextpartitionkey') next_row_key = None if marker is None else marker.get('nextrowkey') request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/' + _to_str(table_name) + '()' request.headers = [('Accept', _to_str(accept))] request.query = [ ('$filter', _to_str(filter)), ('$select', _to_str(select)), ('$top', _int_to_str(max_results)), ('NextPartitionKey', _to_str(next_partition_key)), ('NextRowKey', _to_str(next_row_key)), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_json_response_to_entities(response, property_resolver)
Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int top: The maximum number of entities to return. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.table.models.Entity`
entailment
def commit_batch(self, table_name, batch, timeout=None): ''' Commits a :class:`~azure.storage.table.TableBatch` request. :param str table_name: The name of the table to commit the batch to. :param TableBatch batch: The batch to commit. :param int timeout: The server timeout, expressed in seconds. :return: A list of the batch responses corresponding to the requests in the batch. :rtype: list of response objects ''' _validate_not_none('table_name', table_name) # Construct the batch request request = HTTPRequest() request.method = 'POST' request.host = self._get_host() request.path = '/' + '$batch' request.query = [('timeout', _int_to_str(timeout))] # Update the batch operation requests with table and client specific info for row_key, batch_request in batch._requests: batch_request.host = self._get_host() if batch_request.method == 'POST': batch_request.path = '/' + _to_str(table_name) else: batch_request.path = _get_entity_path(table_name, batch._partition_key, row_key) _update_request(batch_request) # Construct the batch body request.body, boundary = _convert_batch_to_json(batch._requests) request.headers = [('Content-Type', boundary)] # Perform the batch request and return the response response = self._perform_request(request) responses = _parse_batch_response(response.body) return responses
Commits a :class:`~azure.storage.table.TableBatch` request. :param str table_name: The name of the table to commit the batch to. :param TableBatch batch: The batch to commit. :param int timeout: The server timeout, expressed in seconds. :return: A list of the batch responses corresponding to the requests in the batch. :rtype: list of response objects
entailment
def batch(self, table_name, timeout=None): ''' Creates a batch object which can be used as a context manager. Commits the batch on exit. :param str table_name: The name of the table to commit the batch to. :param int timeout: The server timeout, expressed in seconds. ''' batch = TableBatch() yield batch self.commit_batch(table_name, batch, timeout=timeout)
Creates a batch object which can be used as a context manager. Commits the batch on exit. :param str table_name: The name of the table to commit the batch to. :param int timeout: The server timeout, expressed in seconds.
entailment
def get_entity(self, table_name, partition_key, row_key, select=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None): ''' Get an entity from the specified table. Throws if the entity does not exist. :param str table_name: The name of the table to get the entity from. :param str partition_key: The PartitionKey of the entity. :param str row_key: The RowKey of the entity. :param str select: Returns only the desired properties of an entity from the set. :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: The retrieved entity. :rtype: :class:`~azure.storage.table.models.Entity` ''' _validate_not_none('table_name', table_name) request = _get_entity(partition_key, row_key, select, accept) request.host = self._get_host() request.path = _get_entity_path(table_name, partition_key, row_key) request.query += [('timeout', _int_to_str(timeout))] response = self._perform_request(request) return _convert_json_response_to_entity(response, property_resolver)
Get an entity from the specified table. Throws if the entity does not exist. :param str table_name: The name of the table to get the entity from. :param str partition_key: The PartitionKey of the entity. :param str row_key: The RowKey of the entity. :param str select: Returns only the desired properties of an entity from the set. :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: The retrieved entity. :rtype: :class:`~azure.storage.table.models.Entity`
entailment
def insert_entity(self, table_name, entity, timeout=None): ''' Inserts a new entity into the table. Throws if an entity with the same PartitionKey and RowKey already exists. When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. Together, these properties form the primary key and must be unique within the table. Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. If you are using an integer value for the key value, you should convert the integer to a fixed-width string, because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. :param str table_name: The name of the table to insert the entity into. :param entity: The entity to insert. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the inserted entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _insert_entity(entity) request.host = self._get_host() request.path = '/' + _to_str(table_name) request.query += [('timeout', _int_to_str(timeout))] response = self._perform_request(request) return _extract_etag(response)
Inserts a new entity into the table. Throws if an entity with the same PartitionKey and RowKey already exists. When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. Together, these properties form the primary key and must be unique within the table. Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. If you are using an integer value for the key value, you should convert the integer to a fixed-width string, because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. :param str table_name: The name of the table to insert the entity into. :param entity: The entity to insert. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the inserted entity. :rtype: str
entailment
def update_entity(self, table_name, entity, if_match='*', timeout=None): ''' Updates an existing entity in a table. Throws if the entity does not exist. The update_entity operation replaces the entire entity and can be used to remove properties. :param str table_name: The name of the table containing the entity to update. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _update_entity(entity, if_match) request.host = self._get_host() request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) request.query += [('timeout', _int_to_str(timeout))] response = self._perform_request(request) return _extract_etag(response)
Updates an existing entity in a table. Throws if the entity does not exist. The update_entity operation replaces the entire entity and can be used to remove properties. :param str table_name: The name of the table containing the entity to update. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str
entailment
def delete_entity(self, table_name, partition_key, row_key, if_match='*', timeout=None): ''' Deletes an existing entity in a table. Throws if the entity does not exist. When an entity is successfully deleted, the entity is immediately marked for deletion and is no longer accessible to clients. The entity is later removed from the Table service during garbage collection. :param str table_name: The name of the table containing the entity to delete. :param str partition_key: The PartitionKey of the entity. :param str row_key: The RowKey of the entity. :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The delete operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional delete, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('table_name', table_name) request = _delete_entity(partition_key, row_key, if_match) request.host = self._get_host() request.query += [('timeout', _int_to_str(timeout))] request.path = _get_entity_path(table_name, partition_key, row_key) self._perform_request(request)
Deletes an existing entity in a table. Throws if the entity does not exist. When an entity is successfully deleted, the entity is immediately marked for deletion and is no longer accessible to clients. The entity is later removed from the Table service during garbage collection. :param str table_name: The name of the table containing the entity to delete. :param str partition_key: The PartitionKey of the entity. :param str row_key: The RowKey of the entity. :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The delete operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional delete, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds.
entailment
def insert_or_replace_entity(self, table_name, entity, timeout=None): ''' Replaces an existing entity or inserts a new entity if it does not exist in the table. Because this operation can insert or update an entity, it is also known as an "upsert" operation. If insert_or_replace_entity is used to replace an entity, any properties from the previous entity will be removed if the new entity does not define them. :param str table_name: The name of the table in which to insert or replace the entity. :param entity: The entity to insert or replace. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _insert_or_replace_entity(entity) request.host = self._get_host() request.query += [('timeout', _int_to_str(timeout))] request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) response = self._perform_request(request) return _extract_etag(response)
Replaces an existing entity or inserts a new entity if it does not exist in the table. Because this operation can insert or update an entity, it is also known as an "upsert" operation. If insert_or_replace_entity is used to replace an entity, any properties from the previous entity will be removed if the new entity does not define them. :param str table_name: The name of the table in which to insert or replace the entity. :param entity: The entity to insert or replace. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str
entailment
def insert_or_merge_entity(self, table_name, entity, timeout=None): ''' Merges an existing entity or inserts a new entity if it does not exist in the table. If insert_or_merge_entity is used to merge an entity, any properties from the previous entity will be retained if the request does not define or include them. :param str table_name: The name of the table in which to insert or merge the entity. :param entity: The entity to insert or merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _insert_or_merge_entity(entity) request.host = self._get_host() request.query += [('timeout', _int_to_str(timeout))] request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) response = self._perform_request(request) return _extract_etag(response)
Merges an existing entity or inserts a new entity if it does not exist in the table. If insert_or_merge_entity is used to merge an entity, any properties from the previous entity will be retained if the request does not define or include them. :param str table_name: The name of the table in which to insert or merge the entity. :param entity: The entity to insert or merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str
entailment
def get_request_table(self, request): ''' Extracts table name from request.uri. The request.uri has either "/mytable(...)" or "/mytable" format. request: the request to insert, update or delete entity ''' if '(' in request.path: pos = request.path.find('(') return request.path[1:pos] else: return request.path[1:]
Extracts table name from request.uri. The request.uri has either "/mytable(...)" or "/mytable" format. request: the request to insert, update or delete entity
entailment
def get_request_partition_key(self, request): ''' Extracts PartitionKey from request.body if it is a POST request or from request.path if it is not a POST request. Only insert operation request is a POST request and the PartitionKey is in the request body. request: the request to insert, update or delete entity ''' if request.method == 'POST': doc = ETree.fromstring(request.body) part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces) if part_key is None: raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY) return _get_etree_text(part_key) else: uri = url_unquote(request.path) pos1 = uri.find('PartitionKey=\'') pos2 = uri.find('\',', pos1) if pos1 == -1 or pos2 == -1: raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY) return uri[pos1 + len('PartitionKey=\''):pos2]
Extracts PartitionKey from request.body if it is a POST request or from request.path if it is not a POST request. Only insert operation request is a POST request and the PartitionKey is in the request body. request: the request to insert, update or delete entity
entailment
def get_request_row_key(self, request): ''' Extracts RowKey from request.body if it is a POST request or from request.path if it is not a POST request. Only insert operation request is a POST request and the Rowkey is in the request body. request: the request to insert, update or delete entity ''' if request.method == 'POST': doc = ETree.fromstring(request.body) row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces) if row_key is None: raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY) return _get_etree_text(row_key) else: uri = url_unquote(request.path) pos1 = uri.find('RowKey=\'') pos2 = uri.find('\')', pos1) if pos1 == -1 or pos2 == -1: raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY) row_key = uri[pos1 + len('RowKey=\''):pos2] return row_key
Extracts RowKey from request.body if it is a POST request or from request.path if it is not a POST request. Only insert operation request is a POST request and the Rowkey is in the request body. request: the request to insert, update or delete entity
entailment
def validate_request_table(self, request): ''' Validates that all requests have the same table name. Set the table name if it is the first request for the batch operation. request: the request to insert, update or delete entity ''' if self.batch_table: if self.get_request_table(request) != self.batch_table: raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH) else: self.batch_table = self.get_request_table(request)
Validates that all requests have the same table name. Set the table name if it is the first request for the batch operation. request: the request to insert, update or delete entity
entailment
def validate_request_partition_key(self, request): ''' Validates that all requests have the same PartitiionKey. Set the PartitionKey if it is the first request for the batch operation. request: the request to insert, update or delete entity ''' if self.batch_partition_key: if self.get_request_partition_key(request) != \ self.batch_partition_key: raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH) else: self.batch_partition_key = self.get_request_partition_key(request)
Validates that all requests have the same PartitiionKey. Set the PartitionKey if it is the first request for the batch operation. request: the request to insert, update or delete entity
entailment
def validate_request_row_key(self, request): ''' Validates that all requests have the different RowKey and adds RowKey to existing RowKey list. request: the request to insert, update or delete entity ''' if self.batch_row_keys: if self.get_request_row_key(request) in self.batch_row_keys: raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH) else: self.batch_row_keys.append(self.get_request_row_key(request))
Validates that all requests have the different RowKey and adds RowKey to existing RowKey list. request: the request to insert, update or delete entity
entailment
def begin_batch(self): ''' Starts the batch operation. Intializes the batch variables is_batch: batch operation flag. batch_table: the table name of the batch operation batch_partition_key: the PartitionKey of the batch requests. batch_row_keys: the RowKey list of adding requests. batch_requests: the list of the requests. ''' self.is_batch = True self.batch_table = '' self.batch_partition_key = '' self.batch_row_keys = [] self.batch_requests = []
Starts the batch operation. Intializes the batch variables is_batch: batch operation flag. batch_table: the table name of the batch operation batch_partition_key: the PartitionKey of the batch requests. batch_row_keys: the RowKey list of adding requests. batch_requests: the list of the requests.
entailment
def insert_request_to_batch(self, request): ''' Adds request to batch operation. request: the request to insert, update or delete entity ''' self.validate_request_table(request) self.validate_request_partition_key(request) self.validate_request_row_key(request) self.batch_requests.append(request)
Adds request to batch operation. request: the request to insert, update or delete entity
entailment
def commit_batch_requests(self): ''' Commits the batch requests. ''' batch_boundary = b'batch_' + _new_boundary() changeset_boundary = b'changeset_' + _new_boundary() # Commits batch only the requests list is not empty. if self.batch_requests: request = HTTPRequest() request.method = 'POST' request.host = self.batch_requests[0].host request.path = '/$batch' request.headers = [ ('Content-Type', 'multipart/mixed; boundary=' + \ batch_boundary.decode('utf-8')), ('Accept', 'application/atom+xml,application/xml'), ('Accept-Charset', 'UTF-8')] request.body = b'--' + batch_boundary + b'\n' request.body += b'Content-Type: multipart/mixed; boundary=' request.body += changeset_boundary + b'\n\n' content_id = 1 # Adds each request body to the POST data. for batch_request in self.batch_requests: request.body += b'--' + changeset_boundary + b'\n' request.body += b'Content-Type: application/http\n' request.body += b'Content-Transfer-Encoding: binary\n\n' request.body += batch_request.method.encode('utf-8') request.body += b' http://' request.body += batch_request.host.encode('utf-8') request.body += batch_request.path.encode('utf-8') request.body += b' HTTP/1.1\n' request.body += b'Content-ID: ' request.body += str(content_id).encode('utf-8') + b'\n' content_id += 1 # Add different headers for different type requests. if not batch_request.method == 'DELETE': request.body += \ b'Content-Type: application/atom+xml;type=entry\n' for name, value in batch_request.headers: if name == 'If-Match': request.body += name.encode('utf-8') + b': ' request.body += value.encode('utf-8') + b'\n' break request.body += b'Content-Length: ' request.body += str(len(batch_request.body)).encode('utf-8') request.body += b'\n\n' request.body += batch_request.body + b'\n' else: for name, value in batch_request.headers: # If-Match should be already included in # batch_request.headers, but in case it is missing, # just add it. if name == 'If-Match': request.body += name.encode('utf-8') + b': ' request.body += value.encode('utf-8') + b'\n\n' break else: request.body += b'If-Match: *\n\n' request.body += b'--' + changeset_boundary + b'--' + b'\n' request.body += b'--' + batch_boundary + b'--' request.path, request.query = _update_request_uri_query(request) request.headers = _update_storage_table_header(request) self.authentication.sign_request(request) # Submit the whole request as batch request. response = self.perform_request(request) if response.status >= 300: # This exception will be caught by the general error handler # and raised as an azure http exception raise HTTPError(response.status, _ERROR_BATCH_COMMIT_FAIL, self.respheader, response.body) # http://www.odata.org/documentation/odata-version-2-0/batch-processing/ # The body of a ChangeSet response is either a response for all the # successfully processed change request within the ChangeSet, # formatted exactly as it would have appeared outside of a batch, # or a single response indicating a failure of the entire ChangeSet. responses = self._parse_batch_response(response.body) if responses and responses[0].status >= 300: self._report_batch_error(responses[0])
Commits the batch requests.
entailment
def set_license(self, key): """Set the license on a redfish system :param key: license key """ data = {'LicenseKey': key} license_service_uri = (utils.get_subresource_path_by(self, ['Oem', 'Hpe', 'Links', 'LicenseService'])) self._conn.post(license_service_uri, data=data)
Set the license on a redfish system :param key: license key
entailment
def virtual_media(self): """Property to provide reference to `VirtualMediaCollection` instance. It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return virtual_media.VirtualMediaCollection( self._conn, utils.get_subresource_path_by(self, 'VirtualMedia'), redfish_version=self.redfish_version)
Property to provide reference to `VirtualMediaCollection` instance. It is calculated once when the first time it is queried. On refresh, this property gets reset.
entailment
def set_iscsi_info(self, target_name, lun, ip_address, port='3260', auth_method=None, username=None, password=None): """Set iscsi details of the system in uefi boot mode. The initiator system is set with the target details like IQN, LUN, IP, Port etc. :param target_name: Target Name for iscsi. :param lun: logical unit number. :param ip_address: IP address of the target. :param port: port of the target. :param auth_method : either None or CHAP. :param username: CHAP Username for authentication. :param password: CHAP secret. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode. """ raise exception.IloCommandNotSupportedError(ERRMSG)
Set iscsi details of the system in uefi boot mode. The initiator system is set with the target details like IQN, LUN, IP, Port etc. :param target_name: Target Name for iscsi. :param lun: logical unit number. :param ip_address: IP address of the target. :param port: port of the target. :param auth_method : either None or CHAP. :param username: CHAP Username for authentication. :param password: CHAP secret. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode.
entailment
def writeTraj(filename='trajectory.input', data=[], ageunit=0, tunit=0, rhounit=0, idNum=0): ''' Method for writeing Trajectory type ascii files files. Parameters ---------- filename : string The file where this data will be written. data : list A list of 1D data vectors with time, T and rho. ageunit : integer, optional If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit = logtimerev in yrs. The default is 0. logtimerev is log of time until end tunit : integer, optional If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0. rhounit : integer, optional If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0. idNum : optional An optional id argument ''' if data==[]: print('Please input correct data') print('returning None') return None headers=[] if ageunit ==1: headers.append('AGEUNIT = SEC') elif ageunit==0: headers.append('AGEUNIT = YRS') elif ageunit==2: headers.append('AGEUNIT = logtimerev/yrs') if tunit ==1: headers.append('TUNIT = T9K') elif tunit==0: headers.append('TUNIT = T8K') if rhounit ==1: headers.append('RHOUNIT = LOG') elif rhounit==0: headers.append('RHOUNIT = CGS') headers.append('ID = '+str(idNum)) write(filename,headers,['time','T','rho'],data,['YRS/SEC; T8K/T9K; CGS/LOG',"FORMAT: '(10x,A3)'"],trajectory=True)
Method for writeing Trajectory type ascii files files. Parameters ---------- filename : string The file where this data will be written. data : list A list of 1D data vectors with time, T and rho. ageunit : integer, optional If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit = logtimerev in yrs. The default is 0. logtimerev is log of time until end tunit : integer, optional If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0. rhounit : integer, optional If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0. idNum : optional An optional id argument
entailment
def write(filename, headers, dcols, data, headerlines=[], header_char='H', sldir='.', sep=' ', trajectory=False, download=False): ''' Method for writeing Ascii files. Note the attribute name at position i in dcols will be associated with the column data at index i in data. Also the number of data columns(in data) must equal the number of data attributes (in dcols) Also all the lengths of that columns must all be the same. Parameters ---------- filename : string The file where this data will be written. Headers : list A list of Header strings or if the file being written is of type trajectory, this is a List of strings that contain header attributes and their associated values which are seperated by a '='. dcols : list A list of data attributes. data : list A list of lists (or of numpy arrays). headerlines : list, optional Additional list of strings of header data, only used in trajectory data Types. The default is []. header_char : character, optional The character that indicates a header lines. The default is 'H'. sldir : string, optional Where this fill will be written. The default is '.'. sep : string, optional What seperates the data column attributes. The default is ' '. trajectory : boolean, optional Boolean of if we are writeing a trajectory type file. The default is False. download : boolean, optional If using iPython notebook, do you want a download link for the file you write? The default is False. ''' if sldir.endswith(os.sep): filename = str(sldir)+str(filename) else: filename = str(sldir)+os.sep+str(filename) tmp=[] #temp variable lines=[]#list of the data lines lengthList=[]# list of the longest element (data or column name) # in each column if os.path.exists(filename): print('Warning this method will overwrite '+ filename) print('Would you like to continue? (y)es or (n)no?') s = input('--> ') if s=='Y' or s=='y' or s=='Yes' or s=='yes': print('Yes selected') print('Continuing as normal') else: print('No Selected') print('Returning None') return None if len(data)!=len(dcols): print('The number of data columns does not equal the number of Data attributes') print('returning none') return None if trajectory: sep=' ' for i in range(len(headers)): if not trajectory: tmp.append(header_char+' '+headers[i]+'\n') else: tmp.append(headers[i]+'\n') headers=tmp tmp='' for i in range(len(data)): #Line length stuff length=len(dcols[i]) for j in range(len(data[dcols[i]])): #len(data[i]) throws error as type(data)=dict, not list if len(str(data[dcols[i]][j]))>length: #data[i][j] throws error as type(data)=dict, not list length=len(str(data[dcols[i]][j])) lengthList.append(length) print(lengthList) tmp='' tmp1='9' if trajectory: tmp='#' for i in range(len(dcols)): tmp1=dcols[i] if not trajectory: if len(dcols[i]) < lengthList[i]: j=lengthList[i]-len(dcols[i]) for k in range(j): tmp1+=' ' tmp+=sep+tmp1 else: tmp+=' '+dcols[i] tmp+='\n' dcols=tmp tmp='' for i in range(len(data[0])): for j in range(len(data)): tmp1=str(data[j][i]) if len(str(data[j][i])) < lengthList[j]: l=lengthList[j]-len(str(data[j][i])) for k in range(l): tmp1+=' ' tmp+=sep+tmp1 lines.append(tmp+'\n') tmp='' f=open(filename,'w') if not trajectory: for i in range(len(headers)): f.write(headers[i]) f.write(dcols) else: f.write(dcols) for i in range(len(headerlines)): f.write('# '+headerlines[i]+'\n') for i in range(len(headers)): f.write(headers[i]) for i in range(len(lines)): f.write(lines[i]) f.close() if download: from IPython.display import FileLink, FileLinks return FileLink(filename) else: return None
Method for writeing Ascii files. Note the attribute name at position i in dcols will be associated with the column data at index i in data. Also the number of data columns(in data) must equal the number of data attributes (in dcols) Also all the lengths of that columns must all be the same. Parameters ---------- filename : string The file where this data will be written. Headers : list A list of Header strings or if the file being written is of type trajectory, this is a List of strings that contain header attributes and their associated values which are seperated by a '='. dcols : list A list of data attributes. data : list A list of lists (or of numpy arrays). headerlines : list, optional Additional list of strings of header data, only used in trajectory data Types. The default is []. header_char : character, optional The character that indicates a header lines. The default is 'H'. sldir : string, optional Where this fill will be written. The default is '.'. sep : string, optional What seperates the data column attributes. The default is ' '. trajectory : boolean, optional Boolean of if we are writeing a trajectory type file. The default is False. download : boolean, optional If using iPython notebook, do you want a download link for the file you write? The default is False.
entailment
def writeGCE_table(filename,headers,data,dcols=['Isotopes','Yields','Z','A'],header_char='H',sldir='.',sep='&'): ''' Method for writeing data in GCE format in Ascii files. Reads either elements or isotopes dcols[0] needs to contain either isotopes or elements Note the attribute name at position i in dcols will be associated with the column data at index i in data. Also the number of data columns(in data) must equal the number of data attributes (in dcols) Also all the lengths of that columns must all be the same. Input: filename: The file where this data will be written. Headers: A list of Header strings or if the file being written is of type trajectory, this is a List of strings that contain header attributes and their associated values which are seperated by a '='. dcols: A list of data attributes data: A list of lists (or of numpy arrays). header_char the character that indicates a header lines sldir: Where this fill will be written. sep: What seperatesa the data column attributes trajectory: Boolean of if we are writeing a trajectory type file ''' import re from . import utils as u #check if input are elements or isotopes if not '-' in data[0][0]: iso_inp=False dcols=dcols+['Z'] else: iso_inp=True dcols=dcols+['Z','A'] #Attach Z and A if iso_inp==True: data.append([]) data.append([]) u.convert_specie_naming_from_h5_to_ppn(data[0]) Z=u.znum_int A=u.amass_int for i in range(len(data[0])): zz=str(int(Z[i])) aa=str(int(A[i])) data[1][i]='{:.3E}'.format(data[1][i])+' ' data[-2].append(zz) data[-1].append(aa) else: #in order to get Z , create fake isotope from element fake_iso=[] for k in range(len(data[0])): fake_iso.append(data[0][k]+'-99') #print fake_iso data.append([]) u.convert_specie_naming_from_h5_to_ppn(fake_iso) Z=u.znum_int for i in range(len(data[0])): zz=str(int(Z[i])) data[1][i]='{:.3E}'.format(data[1][i])+' ' data[-1].append(zz) if sldir.endswith(os.sep): filename = str(sldir)+str(filename) else: filename = str(sldir)+os.sep+str(filename) tmp=[] #temp variable lines=[]#list of the data lines lengthList=[]# list of the longest element (data or column name) # in each column if os.path.exists(filename): print('This method will add table to existing file '+ filename) if len(data)!=len(dcols): print('The number of data columns does not equal the number of Data attributes') print('returning none') return None for i in range(len(headers)): tmp.append(header_char+' '+headers[i]+'\n') headers=tmp tmp='' for i in range(len(data)): #Line length stuff length=len(dcols[i])+1 for j in range(len(data[i])): tmp2=data[i][j] if isinstance(data[i][j],float): tmp2='{:.3E}'.format(data[i][j])+' ' data[i][j] = tmp2 if len(str(tmp2))>length: length=len(str(tmp2)) lengthList.append(length) tmp='' tmp1='' for i in range(len(dcols)): tmp1=dcols[i] if len(dcols[i]) < lengthList[i]: j=lengthList[i]-len(dcols[i]) for k in range(j): tmp1+=' ' tmp+=sep+tmp1 tmp+='\n' dcols=tmp tmp='' for i in range(len(data[0])): for j in range(len(data)): if type(data[j][i]) == str: #match = re.match(r"([a-z]+)([0-9]+)",data[j][i], re.I) #items = match.groups() tmp1=data[j][i]#items[0].capitalize()+'-'+items[1] if len(str(data[j][i])) < lengthList[j]: l=lengthList[j]-len(tmp1) for k in range(l): tmp1+=' ' extra='' #else: # tmp1=data[j][i] # if len(data[j][i]) < lengthList[j]: # l=lengthList[j]-len(data[j][i])) # for k in xrange(l): # tmp1+=' ' tmp+=sep+tmp1 lines.append(tmp+'\n') tmp='' f=open(filename,'a') for i in range(len(headers)): f.write(headers[i]) f.write(dcols) for i in range(len(lines)): f.write(lines[i]) f.close() return None
Method for writeing data in GCE format in Ascii files. Reads either elements or isotopes dcols[0] needs to contain either isotopes or elements Note the attribute name at position i in dcols will be associated with the column data at index i in data. Also the number of data columns(in data) must equal the number of data attributes (in dcols) Also all the lengths of that columns must all be the same. Input: filename: The file where this data will be written. Headers: A list of Header strings or if the file being written is of type trajectory, this is a List of strings that contain header attributes and their associated values which are seperated by a '='. dcols: A list of data attributes data: A list of lists (or of numpy arrays). header_char the character that indicates a header lines sldir: Where this fill will be written. sep: What seperatesa the data column attributes trajectory: Boolean of if we are writeing a trajectory type file
entailment
def get(self, attri): ''' Method that dynamically determines the type of attribute that is passed into this method. Also it then returns that attribute's associated data. Parameters ---------- attri : string The attribute we are looking for. ''' isCol=False isHead=False if attri in self.dcols: isCol=True elif attri in self.hattrs: isHead=True else: print("That attribute does not exist in this File") print('Returning None') if isCol: return self.getColData(attri) elif isHead: return hattrs
Method that dynamically determines the type of attribute that is passed into this method. Also it then returns that attribute's associated data. Parameters ---------- attri : string The attribute we are looking for.
entailment
def _readFile(self, sldir, fileName, sep): ''' Private method that reads in the header and column data. ''' if sldir.endswith(os.sep): fileName = str(sldir)+str(fileName) else: fileName = str(sldir)+os.sep+str(fileName) fileLines=[] #list of lines in the file header=[] #list of Header lines dataCols=[] #Dictionary of data column names data=[] #List of Data lists cols=[] #List of column names f=open(fileName,'r') fileLines=f.readlines() i=0 if self.datatype != 'trajectory': while i<len(fileLines): if fileLines[i].startswith(self.header_char): tmp=fileLines[i].lstrip(self.header_char) header.append(tmp.strip()) else: break i+=1 cols=fileLines[i].split(sep) tmp=[] tmp1=[] for j in range(len(cols)): tmp1=cols[j].strip() if tmp1 !='': tmp.append(tmp1) cols=tmp i+=1 else: header={} while fileLines[i].startswith('#') or '=' in fileLines[i]: if fileLines[i].startswith('#') and cols==[]: cols=fileLines[i].strip('#') cols=cols.strip() cols=cols.split() elif fileLines[i].startswith('#'): tmp1=fileLines[i].strip('#') tmp1=tmp1.strip() self.headerLines.append(tmp1) elif not fileLines[i].startswith('#'): tmp=fileLines[i].split('=') tmp[0]=tmp[0].strip() tmp[1]=tmp[1].strip() if header=={}: header={str(tmp[0]):str(tmp[1])} else: header[str(tmp[0])]=str(tmp[1]) i+=1 while i<len(fileLines): if fileLines[i].startswith('#'): i=i+1 else: tmp=fileLines[i].split() for j in range(len(tmp)): tmp[j]=tmp[j].strip() data.append(tmp) i+=1 tmp=[] tmp1=[] for j in range(len(data)): for k in range(len(data[j])): tmp1=data[j][k].strip() if tmp1 !='': tmp.append(tmp1) data[j]=tmp tmp=[] tmp=[] for j in range(len(cols)): for k in range(len(data)): try: a=float(data[k][j]) tmp.append(a) except ValueError: tmp.append(data[k][j]) #else: # tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j])) tmp=array(tmp) if j == 0: dataCols={cols[j]:tmp} else: dataCols[cols[j]]=tmp tmp=[] return header,dataCols
Private method that reads in the header and column data.
entailment
def initial_finall_mass_relation(self,marker='o',linestyle='--'): ''' INtiial to final mass relation ''' final_m=[] ini_m=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) ini_m.append(sefiles.get("mini")) h1=sefiles.get(int(sefiles.se.cycles[-2]),'H-1') mass=sefiles.get(int(sefiles.se.cycles[-2]),'mass') idx=-1 for k in range(len(h1)): if h1[k]>0.1: idx=k break final_m.append(mass[idx]) label='Z='+str(sefiles.get('zini')) plt.plot(ini_m,final_m,label=label,marker=marker,linestyle=linestyle) plt.xlabel('$M_{Initial} [M_{\odot}]$',size=23) plt.ylabel('$M_{Final} [M_{\odot}]$',size=23)
INtiial to final mass relation
entailment
def final_bottom_envelope_set1(self): ''' For paper1 marco routine: Numbers of remnant mass shell masses, exists also in mesa_set! ''' inim=[] remnm=[] for i in range(len(self.runs_H5_surf)): m1p65_last=se(self.runs_H5_out[i]) mass_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-1],'mass') top_of_envelope=mass_dummy[len(mass_dummy)-1] h_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-1],'iso_massf','H-1') for j in range(len(mass_dummy)): if h_dummy[j] > 0.05: bottom_of_envelope = mass_dummy[j] break inim.append(m1p65_last.get("mini")) remnm.append(bottom_of_envelope) print "M_initial | M_remn/bottom of envelope" for i in range(len(inim)): print inim[i],"|",remnm[i]
For paper1 marco routine: Numbers of remnant mass shell masses, exists also in mesa_set!
entailment
def remnant_lifetime_agb(self): ''' For paper1 extension: bottom_envelope Numbers of remnant mass shell masses, exists also in mesa_set + star age! ''' inim=[] remnm=[] time11=[] tottime=[] c_core=[] o_core=[] small_co_core=[] c_core_center=[] for i in range(len(self.runs_H5_surf)): m1p65_last=se(self.runs_H5_out[i]) mass_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'mass') top_of_envelope=mass_dummy[len(mass_dummy)-1] h_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'iso_massf','H-1') c_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'iso_massf','C-12') o_dummy=m1p65_last.se.get(m1p65_last.se.cycles[len(m1p65_last.se.cycles)-2],'iso_massf','O-16') for j in range(len(mass_dummy)): if h_dummy[j] > 1e-1: bottom_of_envelope = mass_dummy[j] break inim.append(m1p65_last.get("mini")) remnm.append(bottom_of_envelope) ###Calculate the lifetime (MS) sefiles=m1p65_last cycs=[] for k in range(5,len(sefiles.se.cycles),5): cycs.append(int(sefiles.se.cycles[k])) w=0 for cyc in cycs: c12_center=sefiles.get(cyc,'C-12')[0] #c12_center=c12[w][0] w+=1 if c12_center>1e-1: time1=(sefiles.get(cyc,'age')*sefiles.get('age_unit'))/31557600. time11.append(time1) break tottime.append(sefiles.get(int(sefiles.se.cycles[-1]),'age')/31557600.) print "M_initial | M_remn/bottom of envelope | total lifetime" for i in range(len(inim)): print inim[i],"|",'{:.3E}'.format(remnm[i]),"|",'{:.3E}'.format(tottime[i])
For paper1 extension: bottom_envelope Numbers of remnant mass shell masses, exists also in mesa_set + star age!
entailment
def plot_surf_composition(self,hsfe_hsls = True,lsfe_hsls = False,rbsr_hsls = False,rbzr_hsls = False,sry_srzr = False,rbfe_sfe = False,iso_ratio= False,isotopes_x = ['Mg-25','Mg-24'],isotopes_y = ['Mg-26','Mg-24'],sparsity = 5000,marker1=['o'],symbols1=['k-'],color=['k']): ''' Set only one of the vriables True to plot quantitie4s. In case you set iso_ratio = True: isotooes_x and isototopes_y are used. ''' import nugridse as mp import matplotlib.pyplot as pl import sys plotting=True ### SET THIS IF PLOTTING = TRUE #################### ###>>>>!!!! ONLY ONE OPTION CAN BE TRUE !!!!<<<< ### #test=[hsfe_hsls,lsfe_hsls, #hsfe_hsls = False #lsfe_hsls = False #rbsr_hsls = False #rbzr_hsls = False #sry_srzr = False #rbfe_sfe = False #iso_ratio= True ################################### for i in range(len(self.runs_H5_surf)): m3z2m2=se(self.runs_H5_surf[i]) run_mass=[m3z2m2] markers=[marker1[i]]#['ks']#,'r^','gh','bd','mv','cH','kD'] symbol=[symbols1[i]]#['k-']#,'r--','g-','b:','m-','c--','k-'] color1=color[i] ls_element = ['Sr-84','Sr-86','Sr-87','Sr-88','Y-89','Zr-90','Zr-91','Zr-92','Zr-94','Zr-96'] hs_element = ['Ba-130','Ba-132','Ba-134','Ba-135','Ba-136','Ba-137','Ba-138','La-138','La-139','Nd-142','Nd-143','Nd-144','Nd-145','Nd-146','Nd-148','Nd-150','Sm-144','Sm-147','Sm-148','Sm-149','Sm-150','Sm-152','Sm-154'] # notice that Pb (3rd s-process index) is not included here. s_element = ['Sr-84','Sr-86','Sr-87','Sr-88','Y-89','Zr-90','Zr-91','Zr-92','Zr-94','Zr-96','Ba-130','Ba-132','Ba-134','Ba-135','Ba-136','Ba-137','Ba-138','La-138','La-139','Nd-142','Nd-143','Nd-144','Nd-145','Nd-146','Nd-148','Nd-150','Sm-144','Sm-147','Sm-148','Sm-149','Sm-150','Sm-152','Sm-154'] ls_element = ['Sr-84','Sr-86','Sr-87','Sr-88','Y-89','Zr-90','Zr-91','Zr-92','Zr-94','Zr-96'] hs_element = ['Ba-130','Ba-132','Ba-134','Ba-135','Ba-136','Ba-137','Ba-138','La-138','La-139','Nd-142','Nd-143','Nd-144','Nd-145','Nd-146','Nd-148','Nd-150','Sm-144','Sm-147','Sm-148','Sm-149','Sm-150','Sm-152','Sm-154'] # notice that Pb (3rd s-process index) is not included here. s_element = ['Sr-84','Sr-86','Sr-87','Sr-88','Y-89','Zr-90','Zr-91','Zr-92','Zr-94','Zr-96','Ba-130','Ba-132','Ba-134','Ba-135','Ba-136','Ba-137','Ba-138','La-138','La-139','Nd-142','Nd-143','Nd-144','Nd-145','Nd-146','Nd-148','Nd-150','Sm-144','Sm-147','Sm-148','Sm-149','Sm-150','Sm-152','Sm-154'] s_ini = [] ls_ini = [] hs_ini = [] pb_ini = [] fe_ini = [] rb_ini = [] sr_ini = [] zr_ini = [] y_ini = [] zra_ini = [] zrb_ini = [] gda_ini = [] gdb_ini = [] if plotting: if iso_ratio: isotopes_x = ['Mg-25','Mg-24'] isotopes_y = ['Mg-26','Mg-24'] #isotopes_x = ['Zr-96','Zr-94'] #isotopes_y = ['Gd-152','Gd-154'] #isotopes_y = ['Zr-90','Zr-94'] #isotopes_y = ['Zr-91','Zr-94'] #isotopes_y = ['Zr-92','Zr-94'] #isotopes_x = ['Ba-135','Ba-136'] #isotopes_y = ['Ba-138','Ba-136'] #isotopes_y = ['Ba-134','Ba-136'] #isotopes_y = ['Ba-137','Ba-136'] #isotopes_x = ['Sr-84','Sr-86'] #isotopes_y = ['Sr-87','Sr-86'] #isotopes_y = ['Sr-88','Sr-86'] # get initial value initial_isotopic_ratio_x = float(run_mass[0].se.get(min(run_mass[0].se.cycles),'iso_massf',isotopes_x[0]))/float(run_mass[0].se.get(min(run_mass[0].se.cycles),'iso_massf',isotopes_x[1])) initial_isotopic_ratio_y = float(run_mass[0].se.get(min(run_mass[0].se.cycles),'iso_massf',isotopes_y[0]))/float(run_mass[0].se.get(min(run_mass[0].se.cycles),'iso_massf',isotopes_y[1])) # sparcity for cycles I am looking at. #sparsity = 5000 I_want_iso_ratio = False I_want_delta = True isotopic_ratio_x = [] isotopic_ratio_y = [] isotopic_ratio_x_tps = [] isotopic_ratio_y_tps = [] isotopic_ratio_x_tps_co = [] isotopic_ratio_y_tps_co = [] k = 0 for i in run_mass: dum_isotopic_ratio_x = [] dum_isotopic_ratio_y = [] dumdum_isotopic_ratio_x = [] dumdum_isotopic_ratio_y = [] dum_isotopic_ratio_x_tps = [] dum_isotopic_ratio_y_tps = [] dum_isotopic_ratio_x_tps_co = [] dum_isotopic_ratio_y_tps_co = [] co_ratio=[] for j in i.se.cycles[0::sparsity]: print 'j= ', j dum_isotopic_ratio_x.append(float(i.se.get(j,'iso_massf',isotopes_x[0]))/float(i.se.get(j,'iso_massf',isotopes_x[1]))) dum_isotopic_ratio_y.append(float(i.se.get(j,'iso_massf',isotopes_y[0]))/float(i.se.get(j,'iso_massf',isotopes_y[1]))) co_ratio.append((float(i.se.get(j,'iso_massf','C-12')*4.))/(float(i.se.get(j,'iso_massf','O-16'))*3)) if (len(co_ratio)>1): if (co_ratio[len(co_ratio)-1]>(co_ratio[len(co_ratio)-2]+0.02)): dum_isotopic_ratio_x_tps.append(float(i.se.get(j,'iso_massf',isotopes_x[0]))/float(i.se.get(j,'iso_massf',isotopes_x[1]))) dum_isotopic_ratio_y_tps.append(float(i.se.get(j,'iso_massf',isotopes_y[0]))/float(i.se.get(j,'iso_massf',isotopes_y[1]))) if (co_ratio[len(co_ratio)-1]>1.): dum_isotopic_ratio_x_tps_co.append(float(i.se.get(j,'iso_massf',isotopes_x[0]))/float(i.se.get(j,'iso_massf',isotopes_x[1]))) dum_isotopic_ratio_y_tps_co.append(float(i.se.get(j,'iso_massf',isotopes_y[0]))/float(i.se.get(j,'iso_massf',isotopes_y[1]))) if I_want_delta: dumdum_isotopic_ratio_x=(np.array(dum_isotopic_ratio_x)/initial_isotopic_ratio_x-1.)*1000. dumdum_isotopic_ratio_y=(np.array(dum_isotopic_ratio_y)/initial_isotopic_ratio_y-1.)*1000. dumdum_isotopic_ratio_x_tps=(np.array(dum_isotopic_ratio_x_tps)/initial_isotopic_ratio_x-1.)*1000. dumdum_isotopic_ratio_y_tps=(np.array(dum_isotopic_ratio_y_tps)/initial_isotopic_ratio_y-1.)*1000. dumdum_isotopic_ratio_x_tps_co=(np.array(dum_isotopic_ratio_x_tps_co)/initial_isotopic_ratio_x-1.)*1000. dumdum_isotopic_ratio_y_tps_co=(np.array(dum_isotopic_ratio_y_tps_co)/initial_isotopic_ratio_y-1.)*1000. if I_want_iso_ratio: isotopic_ratio_x.append(dum_isotopic_ratio_x) isotopic_ratio_y.append(dum_isotopic_ratio_y) else: isotopic_ratio_x.append(dumdum_isotopic_ratio_x) isotopic_ratio_y.append(dumdum_isotopic_ratio_y) isotopic_ratio_x_tps.append(dumdum_isotopic_ratio_x_tps) isotopic_ratio_y_tps.append(dumdum_isotopic_ratio_y_tps) isotopic_ratio_x_tps_co.append(dumdum_isotopic_ratio_x_tps_co) isotopic_ratio_y_tps_co.append(dumdum_isotopic_ratio_y_tps_co) k = k+1 markersss=['ko','b^','rh','gd','mv','cH','kD'] symbol=['k-','b--','r-.','g:','m-','c--','k-'] mass_label =[] for i in run_mass: mini=float(i.se.get('mini')) zini=float(i.se.get('zini')) label=str(mini)+'$M_{\odot}$, Z='+str(zini) mass_label.append(label) metallicity_label=['M3z2m2','M3z2m2_nomol','M3z1m2','3 Msun, Z=0.02, PI13'] metallicity_label=[label] array_to_plot_x = isotopic_ratio_x array_to_plot_y = isotopic_ratio_y array_to_plot_x_tps = isotopic_ratio_x_tps array_to_plot_y_tps = isotopic_ratio_y_tps array_to_plot_x_tps_co = isotopic_ratio_x_tps_co array_to_plot_y_tps_co = isotopic_ratio_y_tps_co if I_want_delta: pl.axhline(y=0.,linewidth=2, color='k') pl.axvline(x=0.,linewidth=2, color='k') else: # pl.axhline(y=initial_isotopic_ratio_y/initial_isotopic_ratio_y,linewidth=2, color='k') pl.axhline(y=initial_isotopic_ratio_y,linewidth=2, color='k') # pl.axvline(x=initial_isotopic_ratio_x/initial_isotopic_ratio_x,linewidth=2, color='k') pl.axvline(x=initial_isotopic_ratio_x,linewidth=2, color='k') for k in range(0,len(array_to_plot_x)): if I_want_delta: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=4.,label=str(metallicity_label[k])) pl.plot(array_to_plot_x_tps[k],array_to_plot_y_tps[k],markersss[k],markersize=10.,linewidth=4.) pl.plot(array_to_plot_x_tps_co[k],array_to_plot_y_tps_co[k],markersss[k],markersize=20.,linewidth=4.) else: # pl.loglog(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(mass_label[k])+'Msun ') pl.loglog(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(mass_label[k])+'Msun ') pl.legend(numpoints=1,loc=2,prop={'size':20}) size=20 pl.xticks(size=size) pl.yticks(size=size) #pl.axis([0.08,0.5,0.1,1.]) #pl.grid() if I_want_delta: #pl.xlabel(r'$\delta$($^{25}$Mg/$^{24}$Mg)', fontsize=30) #pl.ylabel(r'$\delta$($^{26}$Mg/$^{24}$Mg)', fontsize=30) pl.xlabel(r'$\delta$($^{96}$Zr/$^{94}$Zr)', fontsize=20) pl.ylabel(r'$\delta$($^{152}$Gd/$^{154}$Gd)', fontsize=20) #pl.ylabel(r'$\delta$($^{90}$Zr/$^{94}$Zr)', fontsize=30) #pl.ylabel(r'$\delta$($^{91}$Zr/$^{94}$Zr)', fontsize=30) #pl.ylabel(r'$\delta$($^{92}$Zr/$^{94}$Zr)', fontsize=30) #pl.xlabel('d($^{135}$Ba/$^{136}$Ba)', fontsize=20) #pl.ylabel('d($^{138}$Ba/$^{136}$Ba)', fontsize=20) #pl.ylabel('d($^{134}$Ba/$^{136}$Ba)', fontsize=20) #pl.ylabel('d($^{137}$Ba/$^{136}$Ba)', fontsize=20) #pl.xlabel(r'$\delta$($^{84}$Sr/$^{86}$Sr)', fontsize=20) #pl.ylabel(r'$\delta$($^{87}$Sr/$^{86}$Sr)', fontsize=20) #pl.ylabel(r'$\delta$($^{88}$Sr/$^{86}$Sr)', fontsize=20) else: pl.xlabel('$^{25}$Mg/$^{24}$Mg', fontsize=20) pl.ylabel('$^{26}$Mg/$^{24}$Mg', fontsize=20) #pl.xlabel('$^{96}$Zr/$^{94}$Zr', fontsize=30) #pl.ylabel('$^{152}$Gd/$^{154}$Gd', fontsize=30) #pl.ylabel('$^{90}$Zr/$^{94}$Zr', fontsize=30) #pl.ylabel('$^{91}$Zr/$^{94}$Zr', fontsize=30) #pl.ylabel('$^{92}$Zr/$^{94}$Zr', fontsize=30) #pl.xlabel('d($^{135}$Ba/$^{136}$Ba)', fontsize=20) #pl.ylabel('d($^{138}$Ba/$^{136}$Ba)', fontsize=20) #pl.ylabel('d($^{134}$Ba/$^{136}$Ba)', fontsize=20) #pl.ylabel('d($^{137}$Ba/$^{136}$Ba)', fontsize=20) #pl.xlabel('$^{84}$Sr/$^{86}$Sr', fontsize=20) #pl.ylabel('$^{87}$Sr/$^{86}$Sr', fontsize=20) #pl.ylabel('$^{88}$Sr/$^{86}$Sr', fontsize=20) sys.exit() for i in run_mass: dum_s_ini = 0. dum_ls_ini = 0. dum_hs_ini = 0. dum_pb_ini = 0. dum_fe_ini = 0. dum_rb_ini = 0. dum_sr_ini = 0. dum_zr_ini = 0. dum_pb_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Pb-204'))+float(i.se.get(min(i.se.cycles),'iso_massf','Pb-206'))+float(i.se.get(min(i.se.cycles),'iso_massf','Pb-207'))+float(i.se.get(min(i.se.cycles),'iso_massf','Pb-208')) # dum_fe_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Fe-54'))+float(i.se.get(min(i.se.cycles),'iso_massf','Fe-56'))+float(i.se.get(min(i.se.cycles),'iso_massf','Fe-57'))+float(i.se.get(min(i.se.cycles),'iso_massf','Fe-58')) dum_rb_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Rb-85'))+float(i.se.get(min(i.se.cycles),'iso_massf','Rb-87')) dum_sr_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Sr-84'))+float(i.se.get(min(i.se.cycles),'iso_massf','Sr-86'))+float(i.se.get(min(i.se.cycles),'iso_massf','Sr-87'))+float(i.se.get(min(i.se.cycles),'iso_massf','Sr-88')) dum_zr_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Zr-90'))+float(i.se.get(min(i.se.cycles),'iso_massf','Zr-91'))+float(i.se.get(min(i.se.cycles),'iso_massf','Zr-92'))+float(i.se.get(min(i.se.cycles),'iso_massf','Zr-94'))+float(i.se.get(min(i.se.cycles),'iso_massf','Zr-96')) dum_y_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Y-89')) dum_zra_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Zr-96')) dum_gda_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Gd-152')) dum_zrb_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Zr-94')) dum_gdb_ini = float(i.se.get(min(i.se.cycles),'iso_massf','Gd-154')) for j in ls_element: dum_ls_ini = dum_ls_ini + float(i.se.get(min(i.se.cycles),'iso_massf',j)) for j in hs_element: dum_hs_ini = dum_hs_ini + float(i.se.get(min(i.se.cycles),'iso_massf',j)) for j in s_element: dum_s_ini = dum_s_ini + float(i.se.get(min(i.se.cycles),'iso_massf',j)) fe_ini.append(dum_fe_ini) pb_ini.append(dum_pb_ini) ls_ini.append(dum_ls_ini) # /float(len(ls_element))) hs_ini.append(dum_hs_ini) #/float(len(hs_element))) s_ini.append(dum_s_ini) #/float(len(s_element))) rb_ini.append(dum_rb_ini) sr_ini.append(dum_sr_ini) zr_ini.append(dum_zr_ini) y_ini.append(dum_y_ini) zra_ini.append(dum_zra_ini) gda_ini.append(dum_gda_ini) zrb_ini.append(dum_zrb_ini) gdb_ini.append(dum_gdb_ini) # sparcity for cycles I am looking at. sparsity = 5000 s_fe = [] ls_fe = [] hs_fe = [] pb_fe = [] hs_ls = [] rb_sr = [] rb_zr = [] rb_fe = [] sr_y = [] sr_zr = [] sr_zr_tps=[] sr_y_tps=[] sr_zr_tps_co=[] sr_y_tps_co=[] s_fe_tps = [] ls_fe_tps = [] hs_fe_tps = [] pb_fe_tps = [] hs_ls_tps = [] rb_sr_tps = [] rb_zr_tps = [] rb_fe_tps = [] s_fe_tps_co = [] ls_fe_tps_co = [] hs_fe_tps_co = [] pb_fe_tps_co = [] hs_ls_tps_co = [] rb_sr_tps_co = [] rb_zr_tps_co = [] rb_fe_tps_co = [] gd=[] zr=[] gd_tps=[] zr_tps=[] gd_tps_co=[] zr_tps_co=[] co=[] k = 0 for i in run_mass: jjjj=0 dum_s_fe = [] dum_ls_fe = [] dum_hs_fe = [] dum_pb_fe = [] dum_hs_ls = [] dum_rb_sr = [] dum_rb_zr = [] dum_rb_fe = [] dum_sr_y = [] dum_sr_zr = [] dum_sr_zr_tps=[] dum_sr_y_tps=[] dum_sr_zr_tps_co=[] dum_sr_y_tps_co=[] dum_s_fe_tps = [] dum_ls_fe_tps = [] dum_hs_fe_tps = [] dum_pb_fe_tps = [] dum_hs_ls_tps = [] dum_rb_sr_tps = [] dum_rb_fe_tps = [] dum_rb_zr_tps = [] dum_sr_y_tps = [] dum_sr_zr_tps = [] dum_s_fe_tps_co = [] dum_ls_fe_tps_co = [] dum_hs_fe_tps_co = [] dum_pb_fe_tps_co = [] dum_hs_ls_tps_co = [] dum_rb_sr_tps_co = [] dum_rb_zr_tps_co = [] dum_rb_fe_tps_co = [] dum_rb_sr_tps_co = [] dum_sr_y_tps_co = [] dum_sr_zr_tps_co = [] dum_gd=[] dum_zrr=[] dum_gd_tps=[] dum_zr_tps=[] dum_gd_tps_co=[] dum_zr_tps_co=[] dum_co=[] for j in i.se.cycles[0::sparsity]: dum_s = 0. dum_ls = 0. dum_hs = 0. dum_fe = 0. dum_pb = 0. dum_rb = 0. dum_sr = 0. dum_zr = 0. dum_y = 0. dum_c = 0. dum_o = 0. dum_gda = 0. dum_gdb = 0. dum_zra = 0. dum_zrb = 0. print j dum_fe = float(i.se.get(j,'iso_massf','Fe-54'))+float(i.se.get(j,'iso_massf','Fe-56'))+float(i.se.get(j,'iso_massf','Fe-57'))+float(i.se.get(j,'iso_massf','Fe-58')) dum_pb = float(i.se.get(j,'iso_massf','Pb-204'))+float(i.se.get(j,'iso_massf','Pb-206'))+float(i.se.get(j,'iso_massf','Pb-207'))+float(i.se.get(j,'iso_massf','Pb-208')) dum_rb = float(i.se.get(j,'iso_massf','Rb-85'))+float(i.se.get(j,'iso_massf','Rb-87')) dum_sr = float(i.se.get(j,'iso_massf','Sr-84'))+float(i.se.get(j,'iso_massf','Sr-86'))+float(i.se.get(j,'iso_massf','Sr-87'))+float(i.se.get(j,'iso_massf','Sr-88')) dum_zr = float(i.se.get(j,'iso_massf','Zr-90'))+float(i.se.get(j,'iso_massf','Zr-91'))+float(i.se.get(j,'iso_massf','Zr-92'))+float(i.se.get(j,'iso_massf','Zr-94'))+float(i.se.get(j,'iso_massf','Zr-96')) dum_y = float(i.se.get(j,'iso_massf','Y-89')) dum_c = float(i.se.get(j,'iso_massf','C-12')) dum_o = float(i.se.get(j,'iso_massf','O-16')) dum_gda = float(i.se.get(j,'iso_massf','Gd-152')) dum_gdb = float(i.se.get(j,'iso_massf','Gd-154')) dum_zra = float(i.se.get(j,'iso_massf','Zr-96')) dum_zrb = float(i.se.get(j,'iso_massf','Zr-94')) dum_c=(float((i.se.get((int(j)),'iso_massf','C-12')))+float((i.se.get(j,'iso_massf','C-13')))) dum_o=(float((i.se.get((int(j)),'iso_massf','O-16')))+float((i.se.get(j,'iso_massf','O-17')))) dum_c12=(float((i.se.get((int(j)),'iso_massf','C-12')))) dum_c13=(float((i.se.get((int(j)),'iso_massf','C-13')))) dum_co.append(((dum_c/dum_o)*(16./12.))) for jj in ls_element: dum_ls = dum_ls + float(i.se.get(j,'iso_massf',jj)) #/float(len(ls_element))) for jj in hs_element: dum_hs = dum_hs + float(i.se.get(j,'iso_massf',jj)) #/float(len(hs_element))) for jj in s_element: dum_s = dum_s + float(i.se.get(j,'iso_massf',jj)) #/float(len(s_element))) dum_s_fe.append(log10((dum_s/dum_fe)/(s_ini[k]/fe_ini[k]))) dum_ls_fe.append(log10((dum_ls/dum_fe)/(ls_ini[k]/fe_ini[k]))) dum_hs_fe.append(log10((dum_hs/dum_fe)/(hs_ini[k]/fe_ini[k]))) dum_pb_fe.append(log10((dum_pb/dum_fe)/(pb_ini[k]/fe_ini[k]))) dum_hs_ls.append(log10((dum_hs/dum_ls)/(hs_ini[k]/ls_ini[k]))) dum_rb_sr.append(log10((dum_rb/dum_sr)/(rb_ini[k]/sr_ini[k]))) dum_rb_zr.append(log10((dum_rb/dum_zr)/(rb_ini[k]/zr_ini[k]))) dum_sr_y.append(log10((dum_sr/dum_y)/(sr_ini[k]/y_ini[k]))) dum_sr_zr.append(log10((dum_sr/dum_zr)/(sr_ini[k]/zr_ini[k]))) dum_gd.append((dum_gda/dum_gdb)/(gda_ini[k]/gdb_ini[k])) dum_zrr.append((dum_zra/dum_zrb)/(zra_ini[k]/zrb_ini[k])) # dum_rb_fe.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.2) ## correction for Lambert 1995 data dum_rb_fe.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.068) ## correction for Zamora 2009 data if (len(dum_co)>1): if (dum_co[len(dum_co)-1]>(dum_co[len(dum_co)-2]+0.02)): dum_sr_zr_tps.append(log10((dum_sr/dum_zr)/(sr_ini[k]/zr_ini[k]))) dum_sr_y_tps.append(log10((dum_sr/dum_y)/(sr_ini[k]/y_ini[k]))) dum_s_fe_tps.append(log10((dum_s/dum_fe)/(s_ini[k]/fe_ini[k]))) dum_ls_fe_tps.append(log10((dum_ls/dum_fe)/(ls_ini[k]/fe_ini[k]))) dum_hs_fe_tps.append(log10((dum_hs/dum_fe)/(hs_ini[k]/fe_ini[k]))) dum_pb_fe_tps.append(log10((dum_pb/dum_fe)/(pb_ini[k]/fe_ini[k]))) dum_hs_ls_tps.append(log10((dum_hs/dum_ls)/(hs_ini[k]/ls_ini[k]))) dum_rb_sr_tps.append(log10((dum_rb/dum_sr)/(rb_ini[k]/sr_ini[k]))) dum_rb_zr_tps.append(log10((dum_rb/dum_zr)/(rb_ini[k]/zr_ini[k]))) # dum_rb_fe_tps.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.2) ## correction for Lambert 1995 data dum_rb_fe_tps.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.068) ## correction for Zamora 2009 data dum_gd_tps.append((((dum_gda/dum_gdb)/(gda_ini[k]/gdb_ini[k]))-1)*1000.) dum_zr_tps.append((((dum_zra/dum_zrb)/(zra_ini[k]/zrb_ini[k]))-1)*1000.) if (dum_co[len(dum_co)-1]>1.): dum_sr_zr_tps_co.append(log10((dum_sr/dum_zr)/(sr_ini[k]/zr_ini[k]))) dum_sr_y_tps_co.append(log10((dum_sr/dum_y)/(sr_ini[k]/y_ini[k]))) dum_s_fe_tps_co.append(log10((dum_s/dum_fe)/(s_ini[k]/fe_ini[k]))) dum_ls_fe_tps_co.append(log10((dum_ls/dum_fe)/(ls_ini[k]/fe_ini[k]))) dum_hs_fe_tps_co.append(log10((dum_hs/dum_fe)/(hs_ini[k]/fe_ini[k]))) dum_pb_fe_tps_co.append(log10((dum_pb/dum_fe)/(pb_ini[k]/fe_ini[k]))) dum_hs_ls_tps_co.append(log10((dum_hs/dum_ls)/(hs_ini[k]/ls_ini[k]))) dum_rb_sr_tps_co.append(log10((dum_rb/dum_sr)/(rb_ini[k]/sr_ini[k]))) dum_rb_zr_tps_co.append(log10((dum_rb/dum_zr)/(rb_ini[k]/zr_ini[k]))) ### dum_rb_fe_tps_co.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.2) dum_rb_zr_tps_co.append(log10((dum_rb/dum_zr)/(rb_ini[k]/zr_ini[k]))) # dum_rb_fe_tps_co.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.2) ## correction for Lambert 1995 data dum_rb_fe_tps_co.append(log10((dum_rb/dum_fe)/(rb_ini[k]/fe_ini[k]))-0.068) ## correction for Zamora 2009 data dum_gd_tps_co.append((((dum_gda/dum_gdb)/(gda_ini[k]/gdb_ini[k]))-1)*1000.) dum_zr_tps_co.append((((dum_zra/dum_zrb)/(zra_ini[k]/zrb_ini[k]))-1)*1000.) jjjj=jjjj+1 # s_fe.append(dum_s_fe) ls_fe.append(dum_ls_fe) hs_fe.append(dum_hs_fe) pb_fe.append(dum_pb_fe) hs_ls.append(dum_hs_ls) rb_sr.append(dum_rb_sr) rb_zr.append(dum_rb_zr) rb_fe.append(dum_rb_fe) sr_y.append(dum_sr_y) sr_zr.append(dum_sr_zr) sr_zr_tps.append(dum_sr_zr_tps) sr_y_tps.append(dum_sr_y_tps) sr_zr_tps_co.append(dum_sr_zr_tps_co) sr_y_tps_co.append(dum_sr_y_tps_co) s_fe_tps.append(dum_s_fe_tps) ls_fe_tps.append(dum_ls_fe_tps) hs_fe_tps.append(dum_hs_fe_tps) pb_fe_tps.append(dum_pb_fe_tps) hs_ls_tps.append(dum_hs_ls_tps) rb_sr_tps.append(dum_rb_sr_tps) rb_zr_tps.append(dum_rb_zr_tps) rb_fe_tps.append(dum_rb_fe_tps) s_fe_tps_co.append(dum_s_fe_tps_co) ls_fe_tps_co.append(dum_ls_fe_tps_co) hs_fe_tps_co.append(dum_hs_fe_tps_co) pb_fe_tps_co.append(dum_pb_fe_tps_co) hs_ls_tps_co.append(dum_hs_ls_tps_co) rb_sr_tps_co.append(dum_rb_sr_tps_co) rb_zr_tps_co.append(dum_rb_zr_tps_co) rb_fe_tps_co.append(dum_rb_fe_tps_co) gd.append(dum_gd) zr.append(dum_zrr) gd_tps.append(dum_gd_tps) zr_tps.append(dum_zr_tps) gd_tps_co.append(dum_gd_tps_co) zr_tps_co.append(dum_zr_tps_co) co.append(dum_co) k = k+1 if plotting: mass_label =[] for i in run_mass: mini=float(i.se.get('mini')) zini=float(i.se.get('zini')) label=str(mini)+'$M_{\odot}$, Z='+str(zini) mass_label.append(label) metallicity_label=['3 Msun, Z=0.02','3 Msun, Z=0.02, no mol diff','3 Msun, Z=0.01','M3 set1.2'] fig = plt.figure(0) # Figure object ax = fig.add_subplot(1,1,1) # Axes object: one row, one column, first plot (one plot!) mpl.rcParams['xtick.major.size'] = 20 #mpl.rcParams['xtick.major.width'] = 4 mpl.rcParams['xtick.minor.size'] = 10 #mpl.rcParams['xtick.minor.width'] = 2 mpl.rcParams['ytick.major.size'] = 20 #mpl.rcParams['ytick.major.width'] = 4 mpl.rcParams['ytick.minor.size'] = 10 #mpl.rcParams['ytick.minor.width'] = 2 if hsfe_hsls: array_to_plot_x = hs_ls array_to_plot_x_tps = hs_ls_tps array_to_plot_x_co = hs_ls_tps_co array_to_plot_y = hs_fe array_to_plot_y_tps = hs_fe_tps array_to_plot_y_co = hs_fe_tps_co pl.axhline(y=0,linewidth=2, color='k') pl.axvline(x=0,linewidth=2, color='k') for k in range(0,len(array_to_plot_x)): #if k > 0: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=3.,color=color1) #pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=3.,label=str(metallicity_label[k])) #if k >= 1: pl.plot(array_to_plot_x_tps[k],array_to_plot_y_tps[k],markers[k],markersize=12.,color=color1) pl.plot(array_to_plot_x_co[k],array_to_plot_y_co[k],markers[k],markersize=20.,color=color1,label=str(mass_label[k])) pl.legend(numpoints=1,loc=2,prop={'size':20}) pl.xlabel('[hs/ls]', fontsize=20) pl.ylabel('[hs/Fe]', fontsize=20) y_min = -0.1 y_max = 1.0 pl.ylim(y_min,y_max) pl.xlim(-0.1,0.5) size=20 pl.xticks(size=size) pl.yticks(size=size) ax = pl.gca() for line in ax.xaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) for line in ax.yaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) if lsfe_hsls: array_to_plot_x = hs_ls array_to_plot_x_tps = hs_ls_tps array_to_plot_x_co = hs_ls_tps_co array_to_plot_y = ls_fe array_to_plot_y_tps = ls_fe_tps array_to_plot_y_co = ls_fe_tps_co pl.axhline(y=0,linewidth=2, color='k') pl.axvline(x=0,linewidth=2, color='k') for k in range(0,len(array_to_plot_x)): # if k > 0: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=4.,label=str(mass_label[k])) pl.plot(array_to_plot_x_tps[k],array_to_plot_y_tps[k],markers[k],markersize=12.) pl.plot(array_to_plot_x_co[k],array_to_plot_y_co[k],markers[k],markersize=20.) pl.legend(numpoints=1,loc='upper right',prop={'size':20}) pl.xlabel('[hs/ls]', fontsize=20) pl.ylabel('[ls/Fe]', fontsize=20) y_min = -0.1 y_max = 0.8 pl.ylim(y_min,y_max) pl.xlim(-0.2,0.5) size=20 pl.xticks(size=size) pl.yticks(size=size) ax = pl.gca() for line in ax.xaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) for line in ax.yaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) if rbsr_hsls: array_to_plot_x = hs_ls array_to_plot_y = rb_sr pl.axhline(y=0,linewidth=2, color='k') pl.axvline(x=0,linewidth=2, color='k') for k in range(0,len(array_to_plot_x)): # if k > 0: if k >= 1: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=4.,label=str(mass_label[k])) else: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=4.,label=str(mass_label[k])) pl.legend(numpoints=1,loc='upper right',prop={'size':20}) pl.xlabel('[hs/ls]', fontsize=20) pl.ylabel('[Rb/Sr]', fontsize=20) y_min = -0.4 y_max = 1.5 x = ax.get_position() pl.ylim(y_min,y_max) pl.xlim(-0.7,0.4) size=30 pl.xticks(size=size) pl.yticks(size=size) ax = pl.gca() for line in ax.xaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) for line in ax.yaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) if rbzr_hsls: array_to_plot_x = hs_ls array_to_plot_y = rb_zr pl.axhline(y=0,linewidth=2, color='k') pl.axvline(x=0,linewidth=2, color='k') for k in range(0,len(array_to_plot_x)): # if k > 0: if k >= 1: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(mass_label[k])) pl.plot(hs_ls_tps[k],rb_zr_tps[k],markers[k],markersize=12.) pl.plot(hs_ls_tps_co[k],rb_zr_tps_co[k],markers[k],markersize=20.) else: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(mass_label[k])) pl.plot(hs_ls_tps[k],rb_zr_tps[k],markers[k],markersize=12.) pl.plot(hs_ls_tps_co[k],rb_zr_tps_co[k],markers[k],markersize=20.) pl.legend(numpoints=1,loc='upper right',prop={'size':20}) pl.xlabel('[hs/ls]', fontsize=20) pl.ylabel('[Rb/Zr]', fontsize=20) y_min = -0.4 y_max = 0.4 pl.ylim(y_min,y_max) pl.xlim(-0.5,0.5) size=20 pl.xticks(size=size) pl.yticks(size=size) ax = pl.gca() for line in ax.xaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) for line in ax.yaxis.get_ticklines(): line.set_markersize(20) line.set_markeredgewidth(3) if rbfe_sfe: array_to_plot_x = s_fe array_to_plot_y = rb_fe pl.axhline(y=0,linewidth=3, color='k') pl.axvline(x=0,linewidth=3, color='k') for k in range(0,len(array_to_plot_x)): # if k > 0: if k >= 1: # pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(mass_label[k])+'Msun '+str(metallicity_label[k])) pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(metallicity_label[k])) pl.plot(s_fe_tps[k],rb_fe_tps[k],markers[k],markersize=12.) pl.plot(s_fe_tps_co[k],rb_fe_tps_co[k],markers[k],markersize=20.) else: # pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(mass_label[k])+'Msun '+str(metallicity_label[k])) pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=2.,label=str(metallicity_label[k])) pl.plot(s_fe_tps[k],rb_fe_tps[k],markers[k],markersize=12.) pl.plot(s_fe_tps_co[k],rb_fe_tps_co[k],markers[k],markersize=20.) pl.legend(numpoints=1,loc=4,prop={'size':20}) pl.xlabel('[s/Fe]', fontsize=20) pl.ylabel('[Rb/Fe]', fontsize=20) y_min = -0.4 y_max = 0.4 pl.ylim(y_min,y_max) pl.xlim(-0.1,2.0) size=20 pl.xticks(size=size) pl.yticks(size=size) ax = pl.gca() for line in ax.xaxis.get_ticklines(): line.set_markersize(25) line.set_markeredgewidth(3) for line in ax.yaxis.get_ticklines(): line.set_markersize(25) line.set_markeredgewidth(3) if sry_srzr: array_to_plot_x = sr_zr array_to_plot_y = sr_y pl.axhline(y=0,linewidth=2, color='k') pl.axvline(x=0,linewidth=2, color='k') for k in range(0,len(array_to_plot_x)): if k >= 1: # if k > 0: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=3.5,label=str(metallicity_label[k])) pl.plot(sr_zr_tps[k],sr_y_tps[k],markers[k],markersize=12.) pl.plot(sr_zr_tps_co[k],sr_y_tps_co[k],markers[k],markersize=20.) else: pl.plot(array_to_plot_x[k],array_to_plot_y[k],symbol[k],markersize=10.,linewidth=3.5,label=str(metallicity_label[k])) pl.plot(sr_zr_tps[k],sr_y_tps[k],markers[k],markersize=12.,label='C/O < 1') pl.plot(sr_zr_tps_co[k],sr_y_tps_co[k],markers[k],markersize=20.,label='C/O > 1') pl.xlabel('[Sr/Zr]', fontsize=20) pl.ylabel('[Sr/Y]', fontsize=20) y_min = -0.1 y_max = 0.1 pl.ylim(y_min,y_max) pl.xlim(-0.3,0.3) size=20 pl.xticks(size=size) pl.yticks(size=size) ax = pl.gca() for line in ax.xaxis.get_ticklines(): line.set_markersize(25) line.set_markeredgewidth(3) for line in ax.yaxis.get_ticklines(): line.set_markersize(25) line.set_markeredgewidth(3) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.9, box.height]) pl.legend(loc=2, borderaxespad=1.0,prop={'size':25}) box = ax.get_position()
Set only one of the vriables True to plot quantitie4s. In case you set iso_ratio = True: isotooes_x and isototopes_y are used.
entailment
def set_plot_CO_mass(self,fig=3123,xaxis='mass',linestyle=['-'],marker=['o'],color=['r'],age_years=True,sparsity=500,markersparsity=200,withoutZlabel=False,t0_model=[]): ''' PLots C/O surface number fraction ''' if len(t0_model)==0: t0_model = len(self.runs_H5_surf)*[0] plt.figure(fig) for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_surf[i]) cycles=range(int(sefiles.se.cycles[0]),int(sefiles.se.cycles[-1]),sparsity) mini=sefiles.get("mini") zini=sefiles.get("zini") label=str(mini)+'$M_{\odot}$, Z='+str(zini) if xaxis=='cycles': x=cycles if xaxis=='age': x=sefiles.get(cycles,'age') if age_years==True: x=np.array(x)*sefiles.get('age_unit')/(365*24*3600) x = x - x[t0_model[i]] if xaxis=='mass': x=sefiles.get(cycles,'mass') x=x[t0_model[i]:] c12=sefiles.get(cycles,'C-12')[t0_model[i]:] o16=sefiles.get(cycles,'O-16')[t0_model[i]:] if withoutZlabel==True: plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label.split(',')[0],marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i]) else: plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label,marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i]) if xaxis=='mass': plt.xlim(7,0.5) #plt.gca().invert_xaxis() plt.xlabel('$M/M_{\odot}$',fontsize=18) plt.ylabel('C/O Ratio', fontsize=18) plt.legend(loc=1)
PLots C/O surface number fraction
entailment
def set_plot_surface_abu(self,fig=2,species=['Sr-88','Ba-136'],decay=False,number_frac=False,xaxis='cycles',age_years=False,ratio=False,sumiso=False,eps=False,samefigure=False,samefigureall=False,withkip=False,sparsity=200,linestyle=['--'],marker=['o'],color=['r'],label=[],markevery=100,t0_model=-1,savefig=''): ''' Simply plots surface abundance versus model number or time ''' extralabel=False if len(label)>0: extralabel=True import nugridse as mp import utils as u idx=0 if eps==True: species= species + ['H-1'] if samefigureall==True and ratio==False: plt.figure(fig) for i in range(len(self.runs_H5_surf)): idx=0 sefiles=mp.se(self.runs_H5_surf[i]) if samefigure==True: plt.figure(i) cycles=range(int(sefiles.se.cycles[0]),int(sefiles.se.cycles[-1]),sparsity) mini=sefiles.get("mini") zini=sefiles.get("zini") if not extralabel: label1=str(mini)+'$M_{\odot}$, Z='+str(zini) if xaxis=='cycles': x=cycles if xaxis=='age': x=sefiles.get(cycles,'age') if age_years==True: x=np.array(x)*sefiles.get('age_unit')/(365*24*3600) print 'x is age' if t0_model>0: #print cycles idxt0=0 for kk in range(len(cycles)): print int(cycles[kk]),t0_model if int(cycles[kk]) == t0_model: idxt0=kk break print 'found t0_model idx',idxt0 #idxt0=cycles.index(t0_model) cycles=cycles[idxt0:] if idxt0==0: print 'Warning, t0modle not found' x=x[idxt0:] - x[idxt0] else: idxt0=0 if xaxis=='mass': x=sefiles.get(cycles,'mass') if decay==False: species_abu1=sefiles.get(cycles,species) else: species_abu11=sefiles.get(cycles,'iso_massf_decay') species_abu1=[] for jj in range(len(cycles)): species_abu1.append([]) for j in range(len(species)): species_abu1[-1].append(species_abu11[jj][sefiles.se.isotopes.index(species[j])]) if len(species)==1: species_abu11=[] for kk in range(len(species_abu1)): species_abu11.append([species_abu1[kk]]) species_abu1=species_abu11 species_abu=[] for k in range(len(species)): print 'species ',k species_abu.append([]) for k in range(len(species)): for h in range(len(cycles)): species_abu[k].append(species_abu1[h][k]) #print species_abu #if t0_model>0: # species_abu=species_abu[t0_model:] for k in range(len(species)): if samefigure==False and ratio==False: fig=plt.figure(species[k]) if xaxis=='cycles': plt.xlabel('model number') if xaxis=='age': plt.xlabel('Age [yrs]') if xaxis=='mass': plt.xlabel('$M/M_{\odot}$') plt.ylabel('X$_i$') if ratio==True: continue if extralabel: label=label[k] else: label=label1 if samefigure==True: if sumiso == True: sumiso_massfrac=np.array(species_abu[0]) for hh in range(1,len(species_abu)): sumiso_massfrac = sumiso_massfrac + np.array(species_abu[hh]) plt.plot(x,sumiso_massfrac,linestyle=linestyle[idx],marker=marker[idx],label=species[k]+', '+label,color=color[idx],markevery=markevery) break #leave iso looop else: if eps==True: species_abu[0]=np.log10(np.array(species_abu[0])/(np.array(species_abu[1])*7)) + 12. plt.plot(x,species_abu[k],linestyle=linestyle[idx],marker=marker[idx],label=species[k]+', '+label,color=color[idx],markevery=markevery) idx+=1 if eps==True: break else: if withkip==True: print 'test' else: plt.ylabel('X('+species[k]+')') if eps==True: species_abu[0]=np.log10(np.array(species_abu[0])/(np.array(species_abu[1])*7)) + 12. plt.plot(x,species_abu[k],linestyle=linestyle[i],marker=marker[i],label=label,color=color[i],markevery=markevery) if eps==True: break plt.legend(loc=2) plt.yscale('log') if ratio==True: if number_frac==True: print 'plot number frac' plt.plot(x,4./3.*np.array(species_abu[1])/np.array(species_abu[0]),linestyle=linestyle[i],marker=marker[i],label=label,color=color[i],markevery=markevery) else: plt.plot(x,np.array(species_abu[1])/np.array(species_abu[0]),linestyle=linestyle[i],marker=marker[i],label=label,color=color[i],markevery=markevery) plt.legend(loc=2) plt.yscale('log') name='M'+str(mini)+'Z'+str(zini) plt.legend(loc=4) plt.savefig(savefig+'/surf_'+name+'.png')
Simply plots surface abundance versus model number or time
entailment
def set_plot_profile_decay(self,cycles=20*[-1],mass_range=20*[[0,0]],ylim=20*[[0,0]],isotopes=[],linestyle=[],save_dir=''): ''' Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] import nugridse as mp import utils as u #print self.runs_H5_restart for i in range(len(self.runs_H5_restart)): sefiles=mp.se(self.runs_H5_restart[i]) cycle=cycles[i] if cycle==-1: cycle=int(sefiles.se.cycles[-1]) if mass_range[i][0] ==0 and mass_range[i][1]==0: mass_range[i][1]=sefiles.get(cycle,'mass')[-1] sefiles.read_iso_abund_marco(mass_range[i],cycle) u.stable_specie() sefiles.decay(sefiles.mass_frac) idx_species=[] for k in range(len(isotopes)): other_name_scheme=isotopes[k].split("-")[0].upper()+(5-len(isotopes[k])+1)*" "+isotopes[k].split("-")[1] #other_name_scheme=other_name_scheme.capitalize() idx_specie=u.back_ind[other_name_scheme] idx_species.append(idx_specie) mass_abu_array=[] for idx_specie in idx_species: mass_abu_array.append([]) for idx_mass in range(len(mp.decayed_multi_d)): mass_abu_array[-1].append(mp.decayed_multi_d[idx_mass][idx_specie]) #plotting plt.figure(self.run_dirs_name[i]) #print len(mp.used_masses),len(mass_abu_array[0]) #print mass_abu_array[0] for k in range(len(isotopes)): plt.plot(mp.used_masses,mass_abu_array[k],linestyle=linestyle[k],label=isotopes[k]) plt.legend() plt.yscale('log') #print sefiles.get(cycle,'mass')[-1] plt.xlabel('M/Msun') plt.ylabel('$X_i$') plt.xlim(mass_range[i][0],mass_range[i][1]) if (ylim[i][0]>0 or ylim[i][1]>0) or (ylim[i][0]>0 and ylim[i][1]>0): plt.ylim(ylim[i][0],ylim[i][1]) if len(save_dir)>0: star_mass=sefiles.get("mini") star_z=sefiles.get("zini") plt.savefig(save_dir+'/'+self.run_dirs_name[i]+'_decay_profiles.png')
Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
entailment
def set_get_abu_distr_decay_old(self,cycles=20*[-1],mass_range=20*[[0,0]],ylim=20*[[0,0]],isotopes=['all'],linestyle=[],save_dir=''): ''' Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] import nugridse as mp import utils as u print self.runs_H5_restart massfrac_all=[] iso_all=[] for i in range(len(self.runs_H5_restart)): sefiles=mp.se(self.runs_H5_restart[i]) cycle=cycles[i] if cycle==-1: cycle=int(sefiles.se.cycles[-1]) if mass_range[i][0] ==0 and mass_range[i][1]==0: mass_range[i][1]=sefiles.get(cycle,'mass')[-1] sefiles.read_iso_abund_marco(mass_range[i],cycle) u.stable_specie() sefiles.decay(sefiles.mass_frac) idx_species=[] massfrac=[] iso=[] if not isotopes[0]=='all': for k in range(len(isotopes)): other_name_scheme=isotopes[k].split("-")[0].upper()+(5-len(isotopes[k])+1)*" "+isotopes[k].split("-")[1] #other_name_scheme=other_name_scheme.capitalize() idx_specie=u.back_ind[other_name_scheme] idx_species.append(idx_specie) massfrac.append(average_massfrac_decay[idx_specie]) iso=isotopes else: massfrac=mp.average_mass_frac_decay other_name_scheme=u.back_ind iso=[] import re for kk in range(len(other_name_scheme)): list1=re.split('(\d+)',other_name_scheme[kk]) newname=list1[0].capitalize()+'-'+list1[1] iso.append(newname) massfrac_all.append(massfrac) iso_all.append(iso) return iso_all,massfrac_all
Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
entailment
def set_cores_massive(self,filename='core_masses_massive.txt'): ''' Uesse function cores in nugridse.py ''' core_info=[] minis=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) mini=sefiles.get('mini') minis.append(mini) incycle=int(sefiles.se.cycles[-1]) core_info.append(sefiles.cores(incycle=incycle)) print_info='' for i in range(len(self.runs_H5_surf)): if i ==0: print 'Following returned for each initial mass' print core_info[i][1] #print '----Mini: ',minis[i],'------' print_info+=(str(minis[i])+' & ') info=core_info[i][0] for k in range(len(info)): print_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ') print_info=(print_info+'\n') #print core_info[i][2] f1=open(filename,'a') f1.write(print_info) f1.close()
Uesse function cores in nugridse.py
entailment
def get_fallback_coord(self,isotope='Ni-56',masslimit=0.1,masscutlim=False,delay=True): ''' Returns fallback mass coordinate so that the amount of masslimit of the isotope isotope is ejected. Explosion type chosen with delay option. masscutlim: If true, new fallback coordinate can only as small as the original fallback prescription by C. Fryer. Useful for more massive stars which would not eject any metals with Freyer's prescription. ''' def getmasscut(m_ini,z_ini,delay): if int(m_ini)==12: m_ini=15 z_metal=z_ini/0.02 print 'MINI',m_ini,z_metal if ((m_ini>=11.) and (m_ini<30.)): if delay==True: mass_cut = 1.1 + 0.2*np.exp((m_ini-11.0)/4.) - (2.0 + z_metal)*np.exp(0.4*(m_ini -26.0)) ####rapid cc else: if m_ini<22.: mass_cut= 1.1 +0.2*np.exp((m_ini-11.0)/7.5) + 10*(1.0+z_metal)*np.exp(-(m_ini-23.5)**2/(1.0+z_metal)**2) elif m_ini<30 : mass_cut= 1.1 + 0.2*np.exp((m_ini-11.0)/4.) - (2.0 + z_metal)*np.exp(0.4*(m_ini -26.0)) - 1.85 + 0.25*z_metal +10.0*(1.0+z_metal)*np.exp(-(m_ini-23.5)**2/(1.0+z_metal)**2) ##at higher mass difference elif ((m_ini>30) and (m_ini<50)): #delay if delay==True: mass_cut= min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)) else: mass_cut = min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)) - 1.85 + z_metal*(75. -m_ini)/20. elif m_ini>50: #Page 7, Fryer12, only at solar Z valid if z_metal==1: if m_ini<90.: mass_cut = 1.8 + 0.04*(90. - m_ini) else: mass_cut = 1.8 + np.log10(m_ini - 89.) #The part below will probably never be used if z_metal <1: if m_ini<90.: mass_cut = max(min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)),1.8 + 0.04*(90. - m_ini)) else: mass_cut = max(min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)),1.8 + np.log10(m_ini - 89.)) mass_cut=round(mass_cut,2) return mass_cut fallback_coords=[] orig_fallback=[] minis=[] ni56_mass_all=[] o16_mass_all=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_restart[i]) m_ini=sefiles.get("mini") z_ini=sefiles.get("zini") minis.append(m_ini) mass_cut=getmasscut(m_ini,z_ini,delay) print 'mass cut',mass_cut cycle=int(sefiles.se.cycles[-1]) mass_cycle=sefiles.get(cycle,"mass") mass_limit=mass_cycle[-1] #maximum mass_test can be idx_start=min(range(len(mass_cycle)), key=lambda i: abs(mass_cycle[i]-mass_cut)) ni56_frac=sefiles.get(cycle,'Ni-56') o16_frac=sefiles.get(cycle,'O-16') deltam=mass_cycle[1:]-mass_cycle[:-1] ni56_mass=0 o16_mass=0 ni56_mass_orig=0 newremnant=mass_cut #go fromm outside to the inside for k in range(len(mass_cycle)-1)[::-1]: cellm=deltam[k]*ni56_frac[k] #in case fallback coordinate should not be smaller then mass_cut (fryer) if masscutlim==True: if mass_cycle[k]<mass_cut: break if ni56_mass>masslimit: newremnant=mass_cycle[k] print 'found new remnant',newremnant,'ni56:',ni56_mass break ni56_mass+=cellm o16_mass+= (deltam[k]*o16_frac[k]) if newremnant == mass_limit: print 'Ni-56 does not reach 0.1Msun, take old remnant',newremnant fallback_coords.append(newremnant) orig_fallback.append(mass_cut) ni56_mass_all.append(ni56_mass) o16_mass_all.append(o16_mass) print '########Results:######' for k in range(len(minis)): print 'Initial mass: '+str(minis[k])+'Original fallback coord (fryer): '+str(orig_fallback[k])+',New fallback coord: '+str(fallback_coords[k])+'Ni-56 ejected: '+str(ni56_mass_all[k])+'O16: '+str(o16_mass_all[k]) return minis, fallback_coords
Returns fallback mass coordinate so that the amount of masslimit of the isotope isotope is ejected. Explosion type chosen with delay option. masscutlim: If true, new fallback coordinate can only as small as the original fallback prescription by C. Fryer. Useful for more massive stars which would not eject any metals with Freyer's prescription.
entailment
def set_burnstages_upgrade_massive(self): ''' Outputs burnign stages as done in burningstages_upgrade (nugridse) ''' burn_info=[] burn_mini=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) burn_info.append(sefiles.burnstage_upgrade()) mini=sefiles.get('mini') #zini=sefiles.get('zini') burn_mini.append(mini) for i in range(len(self.runs_H5_surf)): print 'Following returned for each initial mass' print '[burn_cycles,burn_ages, burn_abun, burn_type,burn_lifetime]' print '----Mini: ',burn_mini[i],'------' print burn_info[i]
Outputs burnign stages as done in burningstages_upgrade (nugridse)
entailment
def set_plot_CC_T_rho_new(self,fig='CC evol',linestyle=['-'],burn_limit=0.997,color=['r'],marker=['o'],markevery=500): ''' Plots end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] plt.figure(fig) for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) t1_model=-1 mini=sefiles.get('mini') zini=sefiles.get('zini') label=str(mini)+'$M_{\odot}$, Z='+str(zini) model=sefiles.se.cycles model_list=[] for k in range(0,len(model),1): model_list.append(model[k]) print 'REad Rho,T, this might take a while...' rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)] T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)] print 'finished' rho=[] T=[] T_unit=sefiles.get('temperature_unit') labeldone=False for k in range(len(model_list)): #print 'test model ',model_list[k] t9max=max(np.array(T1[k])*T_unit/1.e9) #T.append(max(t9)) rho1max=max(rho1[k]) print 'model',model_list[k] print 'maxT, maxrho' print t9max,rho1max if k==0: t9_prev=t9max rho1_prev=rho1max idx_T=0 idx_rho=0 continue if t9max>t9_prev: idx_T=k t9_prev=t9max if rho1max>rho1_prev: idx_rho=k rho1_prev=rho1max print 'found highest rho',idx_rho,max(np.array(T1[idx_rho])*T_unit/1.0e9),max(rho1[idx_rho]),model_list[idx_rho] print 'found highest T',idx_T,max(np.array(T1[idx_T])*T_unit/1.0e9),max(rho1[idx_T]),model_list[idx_T] if idx_T==idx_rho: x=np.array(T1[idx_T])*T_unit/1e9 y=rho1[idx_T] rho1=[] T1=[] for k in range(len(x)): if not y[k]==1.0: rho1.append(y[k]) T1.append(x[k]) x=T1 y=rho1 plt.plot(x,y,label=label,color=color[i],marker=marker[i],linestyle=linestyle[i],markevery=markevery) #rhoi.append(max(rho1[k])) else: #for max T x=np.array(T1[idx_T])*T_unit/1e9 y=rho1[idx_T] rho_temp=[] T_temp=[] for k in range(len(x)): if not y[k]==1.0: rho_temp.append(y[k]) T_temp.append(x[k]) x=T_temp y=rho_temp plt.plot(x,y,label=label,color=color[i],marker=marker[i],linestyle=linestyle[i],markevery=markevery) #for max rho x=np.array(T1[idx_rho])*T_unit/1e9 y=rho1[idx_rho] rho_temp=[] T_temp=[] for k in range(len(x)): if not y[k]==1.0: rho_temp.append(y[k]) T_temp.append(x[k]) x=T_temp y=rho_temp plt.plot(x,y,color=color[i],marker=marker[i],linestyle=linestyle[i],markevery=markevery)
Plots end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
entailment
def set_plot_CC_T_rho_max(self,linestyle=[],burn_limit=0.997,color=['r'],marker=['o'],markevery=500): ''' Plots end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] plt.figure('CC evol') for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) t1_model=-1 sefiles.get('temperature') sefiles.get('density') mini=sefiles.get('mini') zini=sefiles.get('zini') model=sefiles.se.cycles model_list=[] for k in range(0,len(model),1): model_list.append(model[k]) rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)] T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)] rho=[] T=[] T_unit=sefiles.get('temperature_unit') labeldone=False for k in range(len(model_list)): t9=np.array(T1[k])*T_unit/1e9 T.append(max(t9)) rho.append(max(rho1[k])) label=str(mini)+'$M_{\odot}$, Z='+str(zini) plt.plot(T,rho,label=label,color=color[i],marker=marker[i],markevery=markevery) plt.xlabel('$T_{9,max} (GK)$') plt.ylabel(r'$\rho [cm^{-3}]$') plt.yscale('log') plt.xscale('log') plt.legend(loc=2)
Plots end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
entailment
def set_plot_CC_T_rho(self,linestyle=[],burn_limit=0.997,color=['r'],marker=['o'],nolabelZ=False,markevery=500): ''' Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] plt.figure('CC evol') for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) t1_model=-1 sefiles.get('temperature') sefiles.get('density') mini=sefiles.get('mini') zini=sefiles.get('zini') model=sefiles.se.cycles model_list=[] for k in range(0,len(model),1): model_list.append(model[k]) rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)] T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)] rho=[] T=[] T_unit=sefiles.get('temperature_unit') labeldone=False for k in range(len(model_list)): t9=np.array(T1[k])*T_unit/1e9 T_delrem=[] rho_delrem=[] if k ==0: T.append(max(t9)) rho.append(max(rho1[k])) for h in range(len(rho1[k])): if rho1[k][h] < 1e3: T_delrem.append(t9[h]) rho_delrem.append(rho1[k][h]) if nolabelZ==True: plt.plot(T_delrem,rho_delrem,label=self.extra_label[i].split(',')[0],color=color[i],marker=marker[i],markevery=markevery) else: plt.plot(T_delrem,rho_delrem,label=self.extra_label[i],color=color[i],marker=marker[i],markevery=markevery) else: if (max(rho)<max(rho1[k]) or max(T)<max(t9)): for h in range(len(rho1[k])): if rho1[k][h] > 1e3: T_delrem.append(t9[h]) rho_delrem.append(rho1[k][h]) if labeldone==True: plt.plot(T_delrem,rho_delrem,color=color[i],marker=marker[i],markevery=markevery) else: label=str(mini)+'$M_{\odot}$, Z='+str(zini) if nolabelZ==True: plt.plot(T_delrem,rho_delrem,label=label.split(',')[0],color=color[i],marker=marker[i],markevery=markevery) else: plt.plot(T_delrem,rho_delrem,label=label,color=color[i],marker=marker[i],markevery=markevery) labeldone=True T.append(max(t9)) rho.append(max(rho1[k])) else: break #else: plt.xlabel('$T_9 [GK]$',size=22) plt.ylabel(r'$\rho [cm^{-3}]$',size=22) plt.yscale('log') plt.xscale('log') plt.legend(loc=2)
Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
entailment
def set_plot_tcrhoc(self,linestyle=['-'],burn_limit=0.997,marker=['o'],markevery=500,end_model=[-1],deg_line=True): ''' Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] for i in range(len(self.runs_H5_surf)): m1p65_last=se(self.runs_H5_out[i]) t1_model=-1 if end_model[i] != -1: t1_model=end_model[i] model=m1p65_last.se.cycles model_list=[] for k in range(0,len(model),5): model_list.append(model[k]) rho1=m1p65_last.get(model_list,'rho') #[:(t1_model-t0_model)] T1=m1p65_last.get(model_list,'temperature')#[:(t1_model-t0_model)] T_unit=m1p65_last.get('temperature_unit') mass=m1p65_last.get(model_list,'mass') mini=m1p65_last.get('mini') zini=m1p65_last.get('zini') #info=m1p65_last.burnstage_upgrade() #burn_info=[] #burn_cycle=[] #for k in range(len(info[0])): # if 'start' in info[3][k]: # burn_info.append( info[3][k]) # burn_cycle.append(info[0][k]) #print burn_info #print burn_cycle ''' #H #Maybe use get_elemental_abunds(), too slow if individually taken h=m1p65_last.get(model_list,'H-1') h_ini=mini*h[0][-1] h_limit=h_ini*burn_limit print h_limit #He he=m1p65_last.get(model_list,'He-4') he+=m1p65_last.get(model_list,'He-3') he_ini=mini*he[0][-1] he_limit=he_ini*burn_limit print he_limit if mini>5: c=m1p65_last.get(model_list,'C-12') c_ini=mini*c[0][-1] c_limit=c_ini*burn_limit if mini>10: ne=m1p65_last.get(model_list,'Ne-20') ne_ini= mini*ne[0][-1] ne_limit=ne_ini*burn_limit o=m1p65_last.get(model_list,'O-16') o_ini=mini*o[0][-1] o_limit=o_ini*burn_limit si=m1p65_last.get(model_list,'Si-28') si_ini=mini*si[0][-1] si_limit=si_ini*burn_limit print '#############################3' print h_ini,he_ini rho=[] T=[] he_depl_idx=-1 h_depl_idx=-1 c_depl_idx=-1 ne_depl_idx=-1 o_depl_idx=-1 si_depl_idx=-1 ''' rho=[] T=[] for k in range(len(model_list)): rho_center=rho1[k][0] T_center=T1[k][0] rho.append(rho_center) T.append(T_center) ''' mass11=np.array([0]+list(mass[k])) delta_mass=mass11[1:]-mass11[:-1] #for the low-mass + massive AGB if (sum(np.array(he[k])*np.array(delta_mass))<he_limit) and (he_depl_idx==-1): he_depl_idx=k if (sum(np.array(h[k])*np.array(delta_mass))<h_limit) and (h_depl_idx==-1): h_depl_idx=k #for the SAGB + massive if mini>5: if (sum(np.array(c[k])*np.array(delta_mass))<c_limit) and (c_depl_idx==-1): c_depl_idx=k #for the massive stars if mini>10: if (sum(np.array(ne[k])*np.array(delta_mass))<ne_limit) and (ne_depl_idx==-1): ne_depl_idx=k if (sum(np.array(o[k])*np.array(delta_mass))<o_limit) and (o_depl_idx==-1): o_depl_idx=k if (sum(np.array(si[k])*np.array(delta_mass))<si_limit) and (si_depl_idx==-1): si_depl_idx=k ''' T=np.log10(np.array(T)*T_unit) #T_degeneracy=np.log10(np.array(rho)**(2./3.)) rho=np.log10(np.array(rho)) figure(1) #plot(logTeff,logL,marker=symbs[i],label=self.run_label[i],linestyle=linestyle[i],markevery=markevery) #pl.plot(rho,T,marker=symbs[i],label=self.run_label[i],linestyle=linestyle[i],markevery=markevery) label=str(mini)+'$M_{\odot}$, Z='+str(zini) plt.plot(rho,T,label=label,linestyle=linestyle[i],marker=marker[i],markevery=markevery) ''' #plt.plot(rho,T_degeneracy,color='b',linestyle='-',label='$P_e = P_{e,deg}$') print burn_info print burn_cycle burn_cycle=np.array(burn_cycle)/100. print 'changed cycles:', burn_cycle print len(rho) #For different burning stages: if 'H_start' in burn_info: plt.plot(rho[burn_cycle[0]],T[burn_cycle[0]],marker='o',color='b') if 'He_start' in burn_info: plt.plot(rho[burn_cycle[1]],T[burn_cycle[1]],marker='o',color='r') if 'C_start' in burn_info: plt.plot(rho[burn_cycle[2]],T[burn_cycle[2]],marker='o',color='g') if 'Ne_start' in burn_info: plt.plot(rho[burn_cycle[3]],T[burn_cycle[3]],marker='D',color='b') if 'O_start' in burn_info: plt.plot(rho[burn_cycle[4]],T[burn_cycle[4]],marker='D',color='r') if 'Si_start' in burn_info: plt.plot(rho[burn_cycle[5]],T[burn_cycle[5]],marker='D',color='g') ''' #ax = plt.gca() #ax.invert_xaxis() plt.rcParams.update({'font.size': 16}) plt.rc('xtick', labelsize=16) plt.rc('ytick', labelsize=16) legend(loc=4) plt.xlabel('log $\\rho_{\\rm c}$',fontsize=18) plt.ylabel('log $T_{\\rm c}$',fontsize=18) ''' #plt.gca().invert_xaxis() figure(0) #pl.plot(rho,T,marker=symbs[i],label=self.run_label[i],linestyle=linestyle[i],markevery=markevery) plt.plot(rho,T,label=self.extra_label[i],linestyle=linestyle[i]) plt.plot(rho,T_degeneracy,color='b',linestyle='-',label='$P_e = P_{e,deg}$') #For different burning stages: if not h_depl_idx ==-1: plt.plot(rho[h_depl_idx],T[h_depl_idx],marker='o',color='b') if not he_depl_idx ==-1: plt.plot(rho[he_depl_idx],T[he_depl_idx],marker='o',color='r') if not c_depl_idx ==-1: plt.plot(rho[c_depl_idx],T[c_depl_idx],marker='o',color='g') if not ne_depl_idx ==-1: plt.plot(rho[ne_depl_idx],T[ne_depl_idx],marker='D',color='b') if not o_depl_idx ==-1: plt.plot(rho[o_depl_idx],T[o_depl_idx],marker='D',color='r') if not si_depl_idx ==-1: plt.plot(rho[si_depl_idx],T[si_depl_idx],marker='D',color='g') #ax = plt.gca() #ax.invert_xaxis() plt.rcParams.update({'font.size': 16}) plt.rc('xtick', labelsize=16) plt.rc('ytick', labelsize=16) legend(loc=4) plt.xlabel('log $\\rho_{\\rm c}$',fontsize=18) plt.ylabel('log $T_{\\rm c}$',fontsize=18) #plt.gca().invert_xaxis() ''' i+=1 if deg_line==True: rho=np.arange(0,9,0.01) T_degeneracy=2./3. *rho +np.log10(1.207e5 * 1.8/(2.**(5./3.))) #T_degeneracy=np.log10( (10**np.array(rho))**(2./3.)) plt.plot(rho,T_degeneracy,color='b',linestyle='-',label='$P_e = P_{e,deg}$') plt.legend(loc=2)
Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
entailment
def prod_fac_massgrid_A_1(self,weighted=True,log=True,runs=[],isotopes=[],elements=[],cycles=[],plot_set_diagram=True,color=['r','b','g','k'],marker_type=['o','p','s','D'],line_style=['--','-','-.',':'],markersize=[6,6,6,6],line_width=[14,14,14,14],title='',withlabel=True,label='',plot_lines=True,exp_only=False,pre_exp=False,delay=True,exp_dir='',fontsizelabel='x-small',iniabupath='/astro/critter/critter/PPN/forum.astro.keele.ac.uk/frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn',xlim=[0,0]): ''' Plots behaviour star mass dependent yields of different isotopes - specify dirs at the beginning Beware of different z, runs : If array is empty function uses all available directories. !! If len of elements longer than zero, than isotopes will be ignored and elements used!! ''' runs=[] HDF5_surf=[] HDF5_out=[] HDF5_restart=[] for i in range(len(self.run_dirs_name)): HDF5_surf.append(self.runs_H5_surf[i]) HDF5_out.append(self.runs_H5_out[i]) HDF5_restart.append(self.runs_H5_restart[i]) legend_k=0 sefiles=[] sefiles_hout=[] sefiles_restart=[] for i in range(len(HDF5_surf)): sefiles.append(se(HDF5_surf[i])) sefiles_hout.append(se(HDF5_out[i])) sefiles_restart.append(se(HDF5_restart[i])) import utils as u iniabu=u.iniabu(iniabupath) x_iniabu=[] isotopes11=sefiles[0].se.isotopes if len(isotopes)>0: if isotopes[0]=='allstable': isotopes=get_stable(isotopes11,get_elements=False) color=len(isotopes)*[color[0]] marker_type=len(isotopes)*[marker_type[0]] line_style=len(isotopes)*[line_style[0]] markersize=len(isotopes)*[markersize[0]] line_width=len(isotopes)*[line_width[0]] print isotopes x_iniabu=iniabu.iso_abundance(isotopes) mass_numbers=iniabu.a charge=iniabu.z if len(elements)>0: if elements[0]=='allstable': elements=get_stable(isotopes11,get_elements=True) color=len(elements)*[color[0]] marker_type=len(elements)*[marker_type[0]] line_style=len(elements)*[line_style[0]] markersize=len(elements)*[markersize[0]] line_width=len(elements)*[line_width[0]] print elements isotopes=get_stable(isotopes11,get_elements=False) import re x_iniabu=[0]*len(elements) x_iniabu_names=[] for k in range(len(iniabu.names)): iso=iniabu.names[k].replace(' ','') charge1=iniabu.z mass_numbers1=iniabu.a match = re.match(r"([a-z]+)([0-9]+)",iso, re.I) ele1=match.groups()[0].upper() iso=ele1+'-'+match.groups()[1] for r in range(len(elements)): if elements[r] == ele1: x_iniabu_names.append(iso) x_iniabu[r]+=iniabu.iso_abundance(iso) print 'test ouptput',x_iniabu_names #import utils as u #iniabu=u.iniabu(iniabupath) #iniabu.iso_abundance(isotopes) ####dealign with explosion pinput #test to take right dir: # if delay: exp_type='delay' else: exp_type='rapid' slist = os.listdir(exp_dir) expr = re.compile(exp_type) slist=(filter(expr.search,slist)) exp_runs_H5_restart=[] sefiles_exp=[] #to dinstiungish between pre exp and exp sources if pre_exp==False: slist = os.listdir(exp_dir) expr = re.compile(exp_type) slist=(filter(expr.search,slist)) for element in slist: run_path=exp_dir+'/'+element if not os.path.isdir(run_path): continue if os.path.isdir(run_path+"/H5_restart"): sefiles1 = os.listdir(run_path+"/H5_restart") if (filter(expr.search,sefiles1)) <1: print "Warning: No hdf5 restart files found in "+run_path+"/H5_restart" else: exp_runs_H5_restart.append(run_path+"/H5_restart") sefiles_exp.append(se(run_path+"/H5_restart")) else: exp_runs_H5_restart=HDF5_restart for k in range(len(HDF5_restart)): sefiles_exp.append(se(HDF5_restart[k])) z_index_files=[] z_values=[] j=-1 for i in range(len(HDF5_surf)): j+=1 star_z=sefiles[i].get("zini") if star_z not in z_values: z_values.append(star_z) z_index_files.append([]) z_index_files[ z_values.index(star_z)].append(i) max_yield=[] color_iso=-1 yields_1=[] t=0 if len(elements)>0: isotopes=elements legend_k=0 for w in range(len(z_index_files)): star_mass_array=[] yields=[] legend_k+=1 #iso_yields=[] #iniabu_yields_folded=[] production_factor=[] for i in range(len(isotopes)): #iso_yields.append(np.zeros(len( z_index_files[w] ))) #iniabu_yields_folded.append(np.zeros(len( z_index_files[w] ))) if exp_only==False: production_factor.append(np.zeros(len( z_index_files[w] ))) else: production_factor.append([]) ttt=0 for k in z_index_files[w]: if type(sefiles[k].get("mini")) == np.ndarray: star_mass=sefiles[k].get("mini")[0] star_z=sefiles[k].get("zini")[0] else: star_mass=sefiles[k].get("mini") star_z=sefiles[k].get("zini") #star_mass=sefiles[k].get("mini")[0] #star_z=sefiles[k].get("zini")[0] if cycles[k][1]==-1: endcycle=int(sefiles[k].se.cycles[-1]) #+ cycles[k][2] #1000 endcycle= int( round( endcycle,-3) -2000 ) else: endcycle=cycles[k][1] if exp_only==False: star_mass_array.append(star_mass) prod_factor,isotopes_prod_fac,yields,iniabu,mass_frac_ini,remn_mass =self.weighted_yields(sefiles[k],sefiles_hout[k],sefiles_restart[k],isotopes,elements,cycles[k][0],endcycle,cycles[k][2]) #print 'yield output###################################:' #print 'wind: ',yields,prod_factor,iniabu for tt in range(len(prod_factor)): prod_factor[tt]=yields[tt]/(star_mass-remn_mass) #iniabu[tt] else: prod_factor=[] if star_mass<=8: continue for pp in range(len(isotopes)): production_factor[pp].append([0]) star_mass_array.append(star_mass) mass=star_mass metallicity=star_z if mass>8: #in case no ppn_exp run dir is available, skip explosion contribution #this is because mass cut is then larger then actual mass of star...no mass lost for t in range(len(exp_runs_H5_restart)): if type(sefiles_exp[t].get("mini")) == np.ndarray: mass1=sefiles_exp[t].get("mini")[0] metallicity1=sefiles_exp[t].get("zini")[0] else: mass1=sefiles_exp[t].get("mini") metallicity1=sefiles_exp[t].get("zini") ###for currupted files corrupt=False if type(mass1) == list: corrupt=True if type(metallicity1) ==list: corrupt=True if ((float(mass1) < 1) or (float(mass1) >70 )) or type(mass1) == type(None): corrupt=True if ((float(metallicity1) < 0.00001) or (float(metallicity1) >1. )) or type(metallicity1) == type(None): corrupt=True if corrupt == True: metallicity1=0.02 rundir11=exp_runs_H5_restart[t].split('/')[-2] mass1=float(rundir11.split('.')[0][1:]) #print 'corruption, assinge new values',mass1,metallicity1 if mass1 == mass and metallicity1 == metallicity: #sefiles_re2=sefiles_exp[t] #calculate exp part import nugridse as mp reload(mp) #Have to find the right cycle in restart file #.se.cycles does not work when reading all files #therefore identify file with last cycle and read it: refiles=sefiles_exp[t].se.files tosort=[] refiles1=[] for i in range(len(refiles)): cycle_file=refiles[i][-18:-11] if cycle_file.isdigit(): tosort.append(int(cycle_file)) refiles1.append(refiles[i]) idx_sorted=sorted(range(len(tosort)), key=lambda k: tosort[k]) idx_file = idx_sorted[-1] sefiles_re_cycle=mp.se(exp_runs_H5_restart[t],refiles1[idx_file]) if len(sefiles_re_cycle.se.cycles)==0: #in the case there is not cycle in the last file...set1.2 M2- sefiles_re_cycle=mp.se(exp_runs_H5_restart[t],refiles1[idx_sorted[-2]]) print 'tewststest',exp_cycle_inp if exp_cycle_inp>0: sefiles_re_cycle=mp.se(exp_runs_H5_restart[t],exp_cycle_inp) print 'Use custom exp cycle ', exp_cycle_inp #print 'identifies as ',exp_runs_H5_restart[t],'with file',refiles1[idx_file] isotopes_exp,yields_exp,iniabu_exp,mass_cut_exp,first_cycle_exp=self.weighted_yields_explosion(mp,sefiles_re_cycle,sefiles[k],isotopes=isotopes,elements=elements,delay=delay) if exp_only==False: #add pre-exp yields for tt in range(len(prod_factor)): #print 'star above 8M' #print isotopes_prod_fac[tt],yields[tt],yields_exp[tt] #the iniabu from the explosion is not added #because it is already in the iniabu from wind prod_factor[tt]=(yields[tt]+yields_exp[tt])/(star_mass-mass_cut_exp) #(iniabu[tt]) #print 'yields wind:',yields[tt] #print 'prodfac wind:',(yields[tt]/iniabu[tt]) #print 'wind+exp yields : ',yields[tt]+yields_exp[tt] else: for tt in range(len(isotopes_exp)): prod_factor.append((yields_exp[tt])/( (star_mass-mass_cut_exp) - (star_mass-remn_mass) )) #(iniabu_exp[tt])) remn_mass=mass_cut_exp #print 'exp yield',yields_exp #print 'prodfac exp:',(yields_exp[tt]/iniabu_exp[tt]) #print 'wind+exp prodfac:',prod_factor[tt] break #print 'iso exp',len(isotopes_exp) #prod_factor=np.log10(prod_factor) print len(isotopes),len(prod_factor),len(production_factor) #Normalization to solar abundance for i in range(len(isotopes)): production_factor[i][ttt]=prod_factor[i]/x_iniabu[i] ttt+=1 ###plotting #if plot_set_diagram==True: #if plot_set_diagram==True: # fig_2=plt.figure(isotopes[h]) # #fig_2=plt.figure() # ax_2 = fig_2.add_subplot(1,1,1) # plt.rcParams.update({'font.size': 16}) # plt.rc('xtick', labelsize=16) # plt.rc('ytick', labelsize=16) # if log==True: # ax_2.set_yscale('log') color_iso=0 legend_k=0 for h in range(len(isotopes)): #yield_1=iso_yields[i] #mass_1=star_mass_array yield_1=[] mass_1=[] iniabu_folded=[] prod_fac_sorted=[] indices_array=sorted(range(len(star_mass_array)),key=lambda x:star_mass_array[x]) for i in indices_array: # yield_1.append(iso_yields[h][i]) mass_1.append(star_mass_array[i]) # iniabu_folded.append(iniabu_yields_folded[h][i]) prod_fac_sorted.append(production_factor[h][i]) ####Plotting prodfactor #plt.figure(fig_2.number) fig_2=plt.figure(isotopes[h]) ax_2 = fig_2.add_subplot(1,1,1) plt.rcParams.update({'font.size': 16}) plt.rc('xtick', labelsize=16) plt.rc('ytick', labelsize=16) if log==True: ax_2.set_yscale('log') ax_2.legend() ax_2.set_xlabel("M/M$_{\odot}$",fontsize=16) ax_2.minorticks_on() ax_2.set_ylabel("Overproduction factor",fontsize=16) ax_2.set_title(title) if len(label)>0: label=", "+label if len(elements)==0: plot_quantity=isotopes[h] plot_quantity="$^{"+plot_quantity.split("-")[1]+"}$"+plot_quantity.split("-")[0] else: plot_quantity=elements[h] if withlabel==True: plt.plot(mass_1,prod_fac_sorted,marker=marker_type[legend_k],color=color[color_iso],markersize=markersize[legend_k],mfc=color[color_iso],linewidth=line_width[legend_k],linestyle=line_style[color_iso],label=plot_quantity+" , Z="+str(star_z)+label ) else: plt.plot(mass_1,prod_fac_sorted,marker=marker_type[legend_k],color=color[color_iso],markersize=markersize[legend_k],mfc=color[color_iso],linewidth=line_width[legend_k],linestyle=line_style[color_iso]) m_max=max(star_mass_array)+2 if plot_lines==True: plt.plot([0,m_max],[1,1],"k--",linewidth=3) plt.plot([0,m_max],[2,2],"k--",linewidth=1) plt.plot([0,m_max],[0.5,0.5],"k--",linewidth=1) color_iso+=1 legend_k+=1 ##### x_imf=[] y_imf=[] m_max=max(star_mass_array)+2.
Plots behaviour star mass dependent yields of different isotopes - specify dirs at the beginning Beware of different z, runs : If array is empty function uses all available directories. !! If len of elements longer than zero, than isotopes will be ignored and elements used!!
entailment
def _get_request_body_bytes_only(param_name, param_value): '''Validates the request body passed in and converts it to bytes if our policy allows it.''' if param_value is None: return b'' if isinstance(param_value, bytes): return param_value raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
Validates the request body passed in and converts it to bytes if our policy allows it.
entailment
def _get_attribute_value_of(resource, attribute_name, default=None): """Gets the value of attribute_name from the resource It catches the exception, if any, while retrieving the value of attribute_name from resource and returns default. :param resource: The resource object :attribute_name: Property of the resource :returns the property value if no error encountered else return 0. """ try: return getattr(resource, attribute_name) except (sushy.exceptions.SushyError, exception.MissingAttributeError) as e: msg = (('The Redfish controller failed to get the ' 'attribute %(attribute)s from resource %(resource)s. ' 'Error %(error)s') % {'error': str(e), 'attribute': attribute_name, 'resource': resource.__class__.__name__}) LOG.debug(msg) return default
Gets the value of attribute_name from the resource It catches the exception, if any, while retrieving the value of attribute_name from resource and returns default. :param resource: The resource object :attribute_name: Property of the resource :returns the property value if no error encountered else return 0.
entailment
def get_local_gb(system_obj): """Gets the largest volume or the largest disk :param system_obj: The HPESystem object. :returns the size in GB """ local_max_bytes = 0 logical_max_mib = 0 volume_max_bytes = 0 physical_max_mib = 0 drives_max_bytes = 0 simple_max_bytes = 0 # Gets the resources and properties # its quite possible for a system to lack the resource, hence its # URI may also be lacking. # Check if smart_storage resource exist at the system smart_resource = _get_attribute_value_of(system_obj, 'smart_storage') # Check if storage resource exist at the system storage_resource = _get_attribute_value_of(system_obj, 'storages') if smart_resource is not None: logical_max_mib = _get_attribute_value_of( smart_resource, 'logical_drives_maximum_size_mib', default=0) if storage_resource is not None: volume_max_bytes = _get_attribute_value_of( storage_resource, 'volumes_maximum_size_bytes', default=0) # Get the largest volume from the system. local_max_bytes = utils.max_safe([(logical_max_mib * 1024 * 1024), volume_max_bytes]) # if volume is not found, then traverse through the possible disk drives # and get the biggest disk. if local_max_bytes == 0: if smart_resource is not None: physical_max_mib = _get_attribute_value_of( smart_resource, 'physical_drives_maximum_size_mib', default=0) if storage_resource is not None: drives_max_bytes = _get_attribute_value_of( storage_resource, 'drives_maximum_size_bytes', default=0) # Check if the SimpleStorage resource exist at the system. simple_resource = _get_attribute_value_of(system_obj, 'simple_storages') if simple_resource is not None: simple_max_bytes = _get_attribute_value_of( simple_resource, 'maximum_size_bytes', default=0) local_max_bytes = utils.max_safe([(physical_max_mib * 1024 * 1024), drives_max_bytes, simple_max_bytes]) # Convert the received size to GB and reduce the value by 1 Gb as # ironic requires the local_gb to be returned 1 less than actual size. local_gb = 0 if local_max_bytes > 0: local_gb = int(local_max_bytes / (1024 * 1024 * 1024)) - 1 else: msg = ('The maximum size for the hard disk or logical ' 'volume could not be determined.') LOG.debug(msg) return local_gb
Gets the largest volume or the largest disk :param system_obj: The HPESystem object. :returns the size in GB
entailment
def has_ssd(system_obj): """Gets if the system has any drive as SSD drive :param system_obj: The HPESystem object. :returns True if system has SSD drives. """ smart_value = False storage_value = False smart_resource = _get_attribute_value_of(system_obj, 'smart_storage') if smart_resource is not None: smart_value = _get_attribute_value_of( smart_resource, 'has_ssd', default=False) if smart_value: return smart_value # Its returned before just to avoid hitting BMC if we have # already got the SSD device above. storage_resource = _get_attribute_value_of(system_obj, 'storages') if storage_resource is not None: storage_value = _get_attribute_value_of( storage_resource, 'has_ssd', default=False) return storage_value
Gets if the system has any drive as SSD drive :param system_obj: The HPESystem object. :returns True if system has SSD drives.
entailment
def has_nvme_ssd(system_obj): """Gets if the system has any drive as NVMe SSD drive :param system_obj: The HPESystem object. :returns True if system has SSD drives and protocol is NVMe. """ storage_value = False storage_resource = _get_attribute_value_of(system_obj, 'storages') if storage_resource is not None: storage_value = _get_attribute_value_of( storage_resource, 'has_nvme_ssd', default=False) return storage_value
Gets if the system has any drive as NVMe SSD drive :param system_obj: The HPESystem object. :returns True if system has SSD drives and protocol is NVMe.
entailment
def get_drive_rotational_speed_rpm(system_obj): """Gets the set of rotational speed rpms of the disks. :param system_obj: The HPESystem object. :returns the set of rotational speed rpms of the HDD devices. """ speed = set() smart_resource = _get_attribute_value_of(system_obj, 'smart_storage') if smart_resource is not None: speed.update(_get_attribute_value_of( smart_resource, 'drive_rotational_speed_rpm', default=set())) storage_resource = _get_attribute_value_of(system_obj, 'storages') if storage_resource is not None: speed.update(_get_attribute_value_of( storage_resource, 'drive_rotational_speed_rpm', default=set())) return speed
Gets the set of rotational speed rpms of the disks. :param system_obj: The HPESystem object. :returns the set of rotational speed rpms of the HDD devices.
entailment
def create_configuration(self, node, ports): """Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ target_raid_config = node.get('target_raid_config', {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] }
entailment
def erase_devices(self, node, port): """Erase the drives on the bare metal. This method erase all the drives which supports sanitize and the drives which are not part of any logical volume on the bare metal. It calls generic erase method after the success of Sanitize disk erase. :param node: A dictionary of the node object. :param port: A list of dictionaries containing information of ports for the node. :raises exception.HPSSAOperationError, if there is a failure on the erase operation on the controllers. :returns: The dictionary of controllers with the drives and erase status for each drive. """ result = {} result['Disk Erase Status'] = hpssa_manager.erase_devices() result.update(super(ProliantHardwareManager, self).erase_devices(node, port)) return result
Erase the drives on the bare metal. This method erase all the drives which supports sanitize and the drives which are not part of any logical volume on the bare metal. It calls generic erase method after the success of Sanitize disk erase. :param node: A dictionary of the node object. :param port: A list of dictionaries containing information of ports for the node. :raises exception.HPSSAOperationError, if there is a failure on the erase operation on the controllers. :returns: The dictionary of controllers with the drives and erase status for each drive.
entailment
def init_model_based_tags(self, model): """Initializing the model based memory and NIC information tags. It should be called just after instantiating a RIBCL object. ribcl = ribcl.RIBCLOperations(host, login, password, timeout, port, cacert=cacert) model = ribcl.get_product_name() ribcl.init_model_based_tags(model) Again, model attribute is also set here on the RIBCL object. :param model: the model string """ self.model = model if 'G7' in self.model: self.MEMORY_SIZE_TAG = "MEMORY_SIZE" self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed" self.NIC_INFORMATION_TAG = "NIC_INFOMATION" else: self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE" self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A" self.NIC_INFORMATION_TAG = "NIC_INFORMATION"
Initializing the model based memory and NIC information tags. It should be called just after instantiating a RIBCL object. ribcl = ribcl.RIBCLOperations(host, login, password, timeout, port, cacert=cacert) model = ribcl.get_product_name() ribcl.init_model_based_tags(model) Again, model attribute is also set here on the RIBCL object. :param model: the model string
entailment
def _request_ilo(self, root, extra_headers=None): """Send RIBCL XML data to iLO. This function sends the XML request to the ILO and receives the output from ILO. :raises: IloConnectionError() if unable to send the request. """ if self.port: urlstr = 'https://%s:%d/ribcl' % (self.host, self.port) else: urlstr = 'https://%s/ribcl' % (self.host) xml = self._serialize_xml(root) headers = {"Content-length": str(len(xml))} if extra_headers: headers.update(extra_headers) kwargs = {'headers': headers, 'data': xml} if self.cacert is not None: kwargs['verify'] = self.cacert else: kwargs['verify'] = False try: LOG.debug(self._("POST %(url)s with request data: " "%(request_data)s"), {'url': urlstr, 'request_data': MaskedRequestData(kwargs)}) response = requests.post(urlstr, **kwargs) response.raise_for_status() except Exception as e: LOG.debug(self._("Unable to connect to iLO. %s"), e) raise exception.IloConnectionError(e) return response.text
Send RIBCL XML data to iLO. This function sends the XML request to the ILO and receives the output from ILO. :raises: IloConnectionError() if unable to send the request.
entailment
def _create_dynamic_xml(self, cmdname, tag_name, mode, subelements=None): """Create RIBCL XML to send to iLO. This function creates the dynamic xml required to be sent to the ILO for all the APIs. :param cmdname: the API which needs to be implemented. :param tag_name: the tag info under which ILO has defined the particular API. :param mode: 'read' or 'write' :param subelements: dictionary containing subelements of the particular API tree. :returns: the etree.Element for the root of the RIBCL XML """ root = etree.Element('RIBCL', VERSION="2.0") login = etree.SubElement( root, 'LOGIN', USER_LOGIN=self.login, PASSWORD=self.password) tagname = etree.SubElement(login, tag_name, MODE=mode) subelements = subelements or {} etree.SubElement(tagname, cmdname) if six.PY2: root_iterator = root.getiterator(cmdname) else: root_iterator = root.iter(cmdname) for cmd in root_iterator: for key, value in subelements.items(): cmd.set(key, value) return root
Create RIBCL XML to send to iLO. This function creates the dynamic xml required to be sent to the ILO for all the APIs. :param cmdname: the API which needs to be implemented. :param tag_name: the tag info under which ILO has defined the particular API. :param mode: 'read' or 'write' :param subelements: dictionary containing subelements of the particular API tree. :returns: the etree.Element for the root of the RIBCL XML
entailment
def _serialize_xml(self, root): """Serialize XML data into string It serializes the dynamic xml created and converts it to a string. This is done before sending the xml to the ILO. :param root: root of the dynamic xml. """ if hasattr(etree, 'tostringlist'): if six.PY3: xml_content_list = [ x.decode("utf-8") for x in etree.tostringlist(root)] else: xml_content_list = etree.tostringlist(root) xml = '\r\n'.join(xml_content_list) + '\r\n' else: if six.PY3: xml_content = etree.tostring(root).decode("utf-8") else: xml_content = etree.tostring(root) xml = xml_content + '\r\n' return xml
Serialize XML data into string It serializes the dynamic xml created and converts it to a string. This is done before sending the xml to the ILO. :param root: root of the dynamic xml.
entailment
def _parse_output(self, xml_response): """Parse the response XML from iLO. This function parses the output received from ILO. As the output contains multiple XMLs, it extracts one xml at a time and loops over till all the xmls in the response are exhausted. It returns the data to APIs either in dictionary format or as the string. It creates the dictionary only if the Ilo response contains the data under the requested RIBCL command. If the Ilo response contains only the string, then the string is returned back. """ count = 0 xml_dict = {} resp_message = None xml_start_pos = [] for m in re.finditer(r"\<\?xml", xml_response): xml_start_pos.append(m.start()) while count < len(xml_start_pos): if (count == len(xml_start_pos) - 1): result = xml_response[xml_start_pos[count]:] else: start = xml_start_pos[count] end = xml_start_pos[count + 1] result = xml_response[start:end] result = result.strip() message = etree.fromstring(result) resp = self._validate_message(message) if hasattr(resp, 'tag'): xml_dict = self._elementtree_to_dict(resp) elif resp is not None: resp_message = resp count = count + 1 if xml_dict: return xml_dict elif resp_message is not None: return resp_message
Parse the response XML from iLO. This function parses the output received from ILO. As the output contains multiple XMLs, it extracts one xml at a time and loops over till all the xmls in the response are exhausted. It returns the data to APIs either in dictionary format or as the string. It creates the dictionary only if the Ilo response contains the data under the requested RIBCL command. If the Ilo response contains only the string, then the string is returned back.
entailment
def _elementtree_to_dict(self, element): """Convert XML elementtree to dictionary. Converts the actual response from the ILO for an API to the dictionary. """ node = {} text = getattr(element, 'text') if text is not None: text = text.strip() if len(text) != 0: node['text'] = text node.update(element.items()) # element's attributes child_nodes = {} for child in element: # element's children child_nodes.setdefault(child.tag, []).append( self._elementtree_to_dict(child)) # convert all single-element lists into non-lists for key, value in child_nodes.items(): if len(value) == 1: child_nodes[key] = value[0] node.update(child_nodes.items()) return node
Convert XML elementtree to dictionary. Converts the actual response from the ILO for an API to the dictionary.
entailment
def _validate_message(self, message): """Validate XML response from iLO. This function validates the XML response to see if the exit status is 0 or not in the response. If the status is non-zero it raises exception. """ if message.tag != 'RIBCL': # the true case shall be unreachable for response # XML from Ilo as all messages are tagged with RIBCL # but still raise an exception if any invalid # XML response is returned by Ilo. Set status to some # arbitary non-zero value. status = -1 raise exception.IloClientInternalError(message, status) for child in message: if child.tag != 'RESPONSE': return message status = int(child.get('STATUS'), 16) msg = child.get('MESSAGE') if status == 0 and msg != 'No error': return msg if status != 0: if 'syntax error' in msg or 'Feature not supported' in msg: for cmd in BOOT_MODE_CMDS: if cmd in msg: platform = self.get_product_name() msg = ("%(cmd)s is not supported on %(platform)s" % {'cmd': cmd, 'platform': platform}) LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise (exception.IloCommandNotSupportedError (msg, status)) else: LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloClientInternalError(msg, status) if (status in exception.IloLoginFailError.statuses or msg in exception.IloLoginFailError.messages): LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloLoginFailError(msg, status) LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloError(msg, status)
Validate XML response from iLO. This function validates the XML response to see if the exit status is 0 or not in the response. If the status is non-zero it raises exception.
entailment
def _execute_command(self, create_command, tag_info, mode, dic={}): """Execute a command on the iLO. Common infrastructure used by all APIs to send/get response from ILO. """ xml = self._create_dynamic_xml( create_command, tag_info, mode, dic) d = self._request_ilo(xml) data = self._parse_output(d) LOG.debug(self._("Received response data: %s"), data) return data
Execute a command on the iLO. Common infrastructure used by all APIs to send/get response from ILO.
entailment
def get_all_licenses(self): """Retrieve license type, key, installation date, etc.""" data = self._execute_command('GET_ALL_LICENSES', 'RIB_INFO', 'read') d = {} for key, val in data['GET_ALL_LICENSES']['LICENSE'].items(): if isinstance(val, dict): d[key] = data['GET_ALL_LICENSES']['LICENSE'][key]['VALUE'] return d
Retrieve license type, key, installation date, etc.
entailment
def get_vm_status(self, device='FLOPPY'): """Returns the virtual media drive status.""" dic = {'DEVICE': device.upper()} data = self._execute_command( 'GET_VM_STATUS', 'RIB_INFO', 'read', dic) return data['GET_VM_STATUS']
Returns the virtual media drive status.
entailment
def set_host_power(self, power): """Toggle the power button of server. :param power: 'ON' or 'OFF' """ if power.upper() in POWER_STATE: dic = {'HOST_POWER': POWER_STATE[power.upper()]} data = self._execute_command( 'SET_HOST_POWER', 'SERVER_INFO', 'write', dic) return data else: raise exception.IloInvalidInputError( "Invalid input. The expected input is ON or OFF.")
Toggle the power button of server. :param power: 'ON' or 'OFF'
entailment
def set_one_time_boot(self, value): """Configures a single boot from a specific device. :param value: specific device to which the boot option is set """ dic = {'value': value} data = self._execute_command( 'SET_ONE_TIME_BOOT', 'SERVER_INFO', 'write', dic) return data
Configures a single boot from a specific device. :param value: specific device to which the boot option is set
entailment
def insert_virtual_media(self, url, device='FLOPPY'): """Notifies iLO of the location of a virtual media diskette image.""" dic = { 'DEVICE': device.upper(), 'IMAGE_URL': url, } data = self._execute_command( 'INSERT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic) return data
Notifies iLO of the location of a virtual media diskette image.
entailment
def eject_virtual_media(self, device='FLOPPY'): """Ejects the Virtual Media image if one is inserted.""" vm_status = self.get_vm_status(device=device) if vm_status['IMAGE_INSERTED'] == 'NO': return dic = {'DEVICE': device.upper()} self._execute_command( 'EJECT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic)
Ejects the Virtual Media image if one is inserted.
entailment
def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): """Sets the Virtual Media drive status It also allows the boot options for booting from the virtual media. """ dic = {'DEVICE': device.upper()} xml = self._create_dynamic_xml( 'SET_VM_STATUS', 'RIB_INFO', 'write', dic) if six.PY2: child_iterator = xml.getiterator() else: child_iterator = xml.iter() for child in child_iterator: if child.tag == 'SET_VM_STATUS': etree.SubElement(child, 'VM_BOOT_OPTION', VALUE=boot_option.upper()) etree.SubElement(child, 'VM_WRITE_PROTECT', VALUE=write_protect.upper()) d = self._request_ilo(xml) data = self._parse_output(d) return data
Sets the Virtual Media drive status It also allows the boot options for booting from the virtual media.
entailment
def get_supported_boot_mode(self): """Retrieves the supported boot mode. :returns: any one of the following proliantutils.ilo.constants: SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY, SUPPORTED_BOOT_MODE_UEFI_ONLY, SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI """ data = self._execute_command( 'GET_SUPPORTED_BOOT_MODE', 'SERVER_INFO', 'read') supported_boot_mode = ( data['GET_SUPPORTED_BOOT_MODE']['SUPPORTED_BOOT_MODE']['VALUE']) return mappings.GET_SUPPORTED_BOOT_MODE_RIBCL_MAP.get( supported_boot_mode)
Retrieves the supported boot mode. :returns: any one of the following proliantutils.ilo.constants: SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY, SUPPORTED_BOOT_MODE_UEFI_ONLY, SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI
entailment
def set_pending_boot_mode(self, value): """Configures the boot mode of the system from a specific boot mode.""" dic = {'value': value} data = self._execute_command( 'SET_PENDING_BOOT_MODE', 'SERVER_INFO', 'write', dic) return data
Configures the boot mode of the system from a specific boot mode.
entailment
def get_persistent_boot_device(self): """Get the current persistent boot device set for the host.""" result = self._get_persistent_boot() boot_mode = self._check_boot_mode(result) if boot_mode == 'bios': return result[0]['value'] value = result[0]['DESCRIPTION'] if 'HP iLO Virtual USB CD' in value: return 'CDROM' elif 'NIC' in value or 'PXE' in value: return 'NETWORK' elif common.isDisk(value): return 'HDD' else: return None
Get the current persistent boot device set for the host.
entailment
def _set_persistent_boot(self, values=[]): """Configures a boot from a specific device.""" xml = self._create_dynamic_xml( 'SET_PERSISTENT_BOOT', 'SERVER_INFO', 'write') if six.PY2: child_iterator = xml.getiterator() else: child_iterator = xml.iter() for child in child_iterator: for val in values: if child.tag == 'SET_PERSISTENT_BOOT': etree.SubElement(child, 'DEVICE', VALUE=val) d = self._request_ilo(xml) data = self._parse_output(d) return data
Configures a boot from a specific device.
entailment
def _request_host(self): """Request host info from the server.""" urlstr = 'https://%s/xmldata?item=all' % (self.host) kwargs = {} if self.cacert is not None: kwargs['verify'] = self.cacert else: kwargs['verify'] = False try: response = requests.get(urlstr, **kwargs) response.raise_for_status() except Exception as e: raise IloConnectionError(e) return response.text
Request host info from the server.
entailment
def get_host_uuid(self): """Request host UUID of the server. :returns: the host UUID of the server :raises: IloConnectionError if failed connecting to the iLO. """ xml = self._request_host() root = etree.fromstring(xml) data = self._elementtree_to_dict(root) return data['HSI']['SPN']['text'], data['HSI']['cUUID']['text']
Request host UUID of the server. :returns: the host UUID of the server :raises: IloConnectionError if failed connecting to the iLO.
entailment
def get_host_health_data(self, data=None): """Request host health data of the server. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the embedded health data. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO. """ if not data or data and "GET_EMBEDDED_HEALTH_DATA" not in data: data = self._execute_command( 'GET_EMBEDDED_HEALTH', 'SERVER_INFO', 'read') return data
Request host health data of the server. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the embedded health data. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO.
entailment
def get_host_health_power_supplies(self, data=None): """Request the health power supply information. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the power supply information. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO. """ data = self.get_host_health_data(data) d = (data['GET_EMBEDDED_HEALTH_DATA']['POWER_SUPPLIES']['SUPPLY']) if not isinstance(d, list): d = [d] return d
Request the health power supply information. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the power supply information. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO.
entailment
def get_host_health_temperature_sensors(self, data=None): """Get the health Temp Sensor report. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the temperature sensors information. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO. """ data = self.get_host_health_data(data) d = data['GET_EMBEDDED_HEALTH_DATA']['TEMPERATURE']['TEMP'] if not isinstance(d, list): d = [d] return d
Get the health Temp Sensor report. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the temperature sensors information. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO.
entailment
def get_host_health_fan_sensors(self, data=None): """Get the health Fan Sensor Report. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the fan sensor information. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO. """ data = self.get_host_health_data(data) d = data['GET_EMBEDDED_HEALTH_DATA']['FANS']['FAN'] if not isinstance(d, list): d = [d] return d
Get the health Fan Sensor Report. :param: the data to retrieve from the server, defaults to None. :returns: the dictionary containing the fan sensor information. :raises: IloConnectionError if failed connecting to the iLO. :raises: IloError, on an error from iLO.
entailment
def reset_ilo_credential(self, password): """Resets the iLO password. :param password: The password to be set. :raises: IloError, if account not found or on an error from iLO. """ dic = {'USER_LOGIN': self.login} root = self._create_dynamic_xml( 'MOD_USER', 'USER_INFO', 'write', dic) element = root.find('LOGIN/USER_INFO/MOD_USER') etree.SubElement(element, 'PASSWORD', VALUE=password) d = self._request_ilo(root) self._parse_output(d)
Resets the iLO password. :param password: The password to be set. :raises: IloError, if account not found or on an error from iLO.
entailment
def get_essential_properties(self): """Gets essential scheduling properties as required by ironic :returns: a dictionary of server properties like memory size, disk size, number of cpus, cpu arch, port numbers and mac addresses. :raises:IloError if iLO returns an error in command execution. """ data = self.get_host_health_data() properties = { 'memory_mb': self._parse_memory_embedded_health(data) } cpus, cpu_arch = self._parse_processor_embedded_health(data) properties['cpus'] = cpus properties['cpu_arch'] = cpu_arch properties['local_gb'] = self._parse_storage_embedded_health(data) macs = self._parse_nics_embedded_health(data) return_value = {'properties': properties, 'macs': macs} return return_value
Gets essential scheduling properties as required by ironic :returns: a dictionary of server properties like memory size, disk size, number of cpus, cpu arch, port numbers and mac addresses. :raises:IloError if iLO returns an error in command execution.
entailment
def get_server_capabilities(self): """Gets server properties which can be used for scheduling :returns: a dictionary of hardware properties like firmware versions, server model. :raises: IloError, if iLO returns an error in command execution. """ capabilities = {} data = self.get_host_health_data() ilo_firmware = self._get_ilo_firmware_version(data) if ilo_firmware: capabilities.update(ilo_firmware) rom_firmware = self._get_rom_firmware_version(data) if rom_firmware: capabilities.update(rom_firmware) capabilities.update({'server_model': self.get_product_name()}) capabilities.update(self._get_number_of_gpu_devices_connected(data)) boot_modes = common.get_supported_boot_modes( self.get_supported_boot_mode()) capabilities.update({ 'boot_mode_bios': boot_modes.boot_mode_bios, 'boot_mode_uefi': boot_modes.boot_mode_uefi}) return capabilities
Gets server properties which can be used for scheduling :returns: a dictionary of hardware properties like firmware versions, server model. :raises: IloError, if iLO returns an error in command execution.
entailment
def _parse_memory_embedded_health(self, data): """Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: memory size in MB. :raises IloError, if unable to get the memory details. """ memory_mb = 0 memory = self._get_memory_details_value_based_on_model(data) if memory is None: msg = "Unable to get memory data. Error: Data missing" raise exception.IloError(msg) total_memory_size = 0 for memory_item in memory: memsize = memory_item[self.MEMORY_SIZE_TAG]["VALUE"] if memsize != self.MEMORY_SIZE_NOT_PRESENT_TAG: memory_bytes = ( strutils.string_to_bytes( memsize.replace(' ', ''), return_int=True)) memory_mb = int(memory_bytes / (1024 * 1024)) total_memory_size = total_memory_size + memory_mb return total_memory_size
Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: memory size in MB. :raises IloError, if unable to get the memory details.
entailment
def _parse_processor_embedded_health(self, data): """Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: processor details like cpu arch and number of cpus. """ processor = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA'] ['PROCESSORS']), 'PROCESSOR') if processor is None: msg = "Unable to get cpu data. Error: Data missing" raise exception.IloError(msg) cpus = 0 for proc in processor: for val in proc.values(): processor_detail = val['VALUE'] proc_core_threads = processor_detail.split('; ') for x in proc_core_threads: if "thread" in x: v = x.split() try: cpus = cpus + int(v[0]) except ValueError: msg = ("Unable to get cpu data. " "The Value %s returned couldn't be " "manipulated to get number of " "actual processors" % processor_detail) raise exception.IloError(msg) cpu_arch = 'x86_64' return cpus, cpu_arch
Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: processor details like cpu arch and number of cpus.
entailment
def _parse_storage_embedded_health(self, data): """Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB. """ local_gb = 0 storage = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'], 'STORAGE') if storage is None: # We dont raise exception because this dictionary # is available only when RAID is configured. # If we raise error here then we will always fail # inspection where this module is consumed. Hence # as a workaround just return 0. return local_gb minimum = local_gb for item in storage: cntlr = self.get_value_as_list(item, 'CONTROLLER') if cntlr is None: continue for s in cntlr: drive = self.get_value_as_list(s, 'LOGICAL_DRIVE') if drive is None: continue for item in drive: for key, val in item.items(): if key == 'CAPACITY': capacity = val['VALUE'] local_bytes = (strutils.string_to_bytes( capacity.replace(' ', ''), return_int=True)) local_gb = int(local_bytes / (1024 * 1024 * 1024)) if minimum >= local_gb or minimum == 0: minimum = local_gb # Return disk size 1 less than the actual disk size. This prevents # the deploy to fail from Nova when root_gb is same as local_gb # in Ironic. When the disk size is used as root_device hints, # then it should be given as the actual size i.e. # ironic (node.properties['local_gb'] + 1) else root device # hint will fail. if minimum: minimum = minimum - 1 return minimum
Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB.
entailment
def get_value_as_list(self, dictionary, key): """Helper function to check and convert a value to list. Helper function to check and convert a value to json list. This helps the ribcl data to be generalized across the servers. :param dictionary: a dictionary to check in if key is present. :param key: key to be checked if thats present in the given dictionary. :returns the data converted to a list. """ if key not in dictionary: return None value = dictionary[key] if not isinstance(value, list): return [value] else: return value
Helper function to check and convert a value to list. Helper function to check and convert a value to json list. This helps the ribcl data to be generalized across the servers. :param dictionary: a dictionary to check in if key is present. :param key: key to be checked if thats present in the given dictionary. :returns the data converted to a list.
entailment
def _parse_nics_embedded_health(self, data): """Gets the NIC details from get_embedded_health data Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: a dictionary of port numbers and their corresponding mac addresses. :raises IloError, if unable to get NIC data. """ nic_data = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA'] [self.NIC_INFORMATION_TAG]), 'NIC') if nic_data is None: msg = "Unable to get NIC details. Data missing" raise exception.IloError(msg) nic_dict = {} for item in nic_data: try: port = item['NETWORK_PORT']['VALUE'] mac = item['MAC_ADDRESS']['VALUE'] self._update_nic_data_from_nic_info_based_on_model(nic_dict, item, port, mac) except KeyError: msg = "Unable to get NIC details. Data missing" raise exception.IloError(msg) return nic_dict
Gets the NIC details from get_embedded_health data Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: a dictionary of port numbers and their corresponding mac addresses. :raises IloError, if unable to get NIC data.
entailment
def _get_firmware_embedded_health(self, data): """Parse the get_host_health_data() for server capabilities :param data: the output returned by get_host_health_data() :returns: a dictionary of firmware name and firmware version. """ firmware = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'], 'FIRMWARE_INFORMATION') if firmware is None: return None return dict((y['FIRMWARE_NAME']['VALUE'], y['FIRMWARE_VERSION']['VALUE']) for x in firmware for y in x.values())
Parse the get_host_health_data() for server capabilities :param data: the output returned by get_host_health_data() :returns: a dictionary of firmware name and firmware version.
entailment
def _get_rom_firmware_version(self, data): """Gets the rom firmware version for server capabilities Parse the get_host_health_data() to retreive the firmware details. :param data: the output returned by get_host_health_data() :returns: a dictionary of rom firmware version. """ firmware_details = self._get_firmware_embedded_health(data) if firmware_details: try: rom_firmware_version = ( firmware_details['HP ProLiant System ROM']) return {'rom_firmware_version': rom_firmware_version} except KeyError: return None
Gets the rom firmware version for server capabilities Parse the get_host_health_data() to retreive the firmware details. :param data: the output returned by get_host_health_data() :returns: a dictionary of rom firmware version.
entailment