_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q19900
Users.set_privilege
train
def set_privilege(self, name, value=None): """Configures the user privilege value in EOS Args: name (str): The name of the user to craete value (int): The privilege value to assign to the user. Valid values are in the range of 0 to 15 Returns: True if the operation was successful otherwise False Raises: TypeError: if the value is not in the valid range """ cmd = 'username %s' % name if value is not None: if not isprivilege(value): raise TypeError('priviledge value must be between 0 and 15') cmd += ' privilege %s' % value else: cmd += ' privilege 1' return self.configure(cmd)
python
{ "resource": "" }
q19901
Users.set_role
train
def set_role(self, name, value=None, default=False, disable=False): """Configures the user role vale in EOS Args: name (str): The name of the user to create value (str): The value to configure for the user role default (bool): Configure the user role using the EOS CLI default command disable (bool): Negate the user role using the EOS CLI no command Returns: True if the operation was successful otherwise False """ cmd = self.command_builder('username %s role' % name, value=value, default=default, disable=disable) return self.configure(cmd)
python
{ "resource": "" }
q19902
make_connection
train
def make_connection(transport, **kwargs): """ Creates a connection instance based on the transport This function creates the EapiConnection object based on the desired transport. It looks up the transport class in the TRANSPORTS global dictionary. Args: transport (string): The transport to use to create the instance. **kwargs: Arbitrary keyword arguments. Returns: An instance of a connection object based on the transport Raises: TypeError: A TypeError is raised if the transport keyword is not found in the list (keys) of available transports. """ if transport not in TRANSPORTS: raise TypeError('invalid transport specified') klass = TRANSPORTS[transport] return klass(**kwargs)
python
{ "resource": "" }
q19903
connect
train
def connect(transport=None, host='localhost', username='admin', password='', port=None, timeout=60, return_node=False, **kwargs): """ Creates a connection using the supplied settings This function will create a connection to an Arista EOS node using the arguments. All arguments are optional with default values. Args: transport (str): Specifies the type of connection transport to use. Valid values for the connection are socket, http_local, http, and https. The default value is specified in DEFAULT_TRANSPORT host (str): The IP addres or DNS host name of the connection device. The default value is 'localhost' username (str): The username to pass to the device to authenticate the eAPI connection. The default value is 'admin' password (str): The password to pass to the device to authenticate the eAPI connection. The default value is '' port (int): The TCP port of the endpoint for the eAPI connection. If this keyword is not specified, the default value is automatically determined by the transport type. (http=80, https=443) return_node (bool): Returns a Node object if True, otherwise returns an EapiConnection object. Returns: An instance of an EapiConnection object for the specified transport. """ transport = transport or DEFAULT_TRANSPORT connection = make_connection(transport, host=host, username=username, password=password, port=port, timeout=timeout) if return_node: return Node(connection, transport=transport, host=host, username=username, password=password, port=port, **kwargs) return connection
python
{ "resource": "" }
q19904
connect_to
train
def connect_to(name): """Creates a node instance based on an entry from the config This function will retrieve the settings for the specified connection from the config and return a Node instance. The configuration must be loaded prior to calling this function. Args: name (str): The name of the connection to load from the config. The name argument should be the connection name (everything right of the colon from the INI file) Returns: This function will return an instance of Node with the settings from the config instance. Raises: AttributeError: raised if the specified configuration name is not found in the loaded configuration """ kwargs = config_for(name) if not kwargs: raise AttributeError('connection profile not found in config') node = connect(return_node=True, **kwargs) return node
python
{ "resource": "" }
q19905
Config.connections
train
def connections(self): """ Returns all of the loaded connections names as a list """ conn = lambda x: str(x).replace('connection:', '') return [conn(name) for name in self.sections()]
python
{ "resource": "" }
q19906
Config.autoload
train
def autoload(self): """ Loads the eapi.conf file This method will use the module variable CONFIG_SEARCH_PATH to attempt to locate a valid eapi.conf file if a filename is not already configured. This method will load the first eapi.conf file it finds and then return. The CONFIG_SEARCH_PATH can be overridden using an environment variable by setting EAPI_CONF. """ path = list(CONFIG_SEARCH_PATH) if 'EAPI_CONF' in os.environ: path = os.environ['EAPI_CONF'] elif self.filename: path = self.filename path = make_iterable(path) for filename in path: filename = os.path.expanduser(filename) if os.path.exists(filename): self.filename = filename return self.read(filename) self._add_default_connection()
python
{ "resource": "" }
q19907
Config.read
train
def read(self, filename): """Reads the file specified by filename This method will load the eapi.conf file specified by filename into the instance object. It will also add the default connection localhost if it was not defined in the eapi.conf file Args: filename (str): The full path to the file to load """ try: SafeConfigParser.read(self, filename) except SafeConfigParserError as exc: # Ignore file and syslog a message on SafeConfigParser errors msg = ("%s: parsing error in eapi conf file: %s" % (type(exc).__name__, filename)) debug(msg) self._add_default_connection() for name in self.sections(): if name.startswith('connection:') and \ 'host' not in dict(self.items(name)): self.set(name, 'host', name.split(':')[1]) self.generate_tags()
python
{ "resource": "" }
q19908
Config.generate_tags
train
def generate_tags(self): """ Generates the tags with collection with hosts """ self.tags = dict() for section in self.sections(): if self.has_option(section, 'tags'): tags = self.get(section, 'tags') for tag in [str(t).strip() for t in tags.split(',')]: if tag not in self.tags: self.tags[tag] = list() self.tags[tag].append(section.split(':')[1])
python
{ "resource": "" }
q19909
Config.reload
train
def reload(self): """Reloades the configuration This method will reload the configuration instance using the last known filename. Note this method will initially clear the configuration and reload all entries. """ for section in self.sections(): self.remove_section(section) self.autoload()
python
{ "resource": "" }
q19910
Config.get_connection
train
def get_connection(self, name): """Returns the properties for a connection name This method will return the settings for the configuration specified by name. Note that the name argument should only be the name. For instance, give the following eapi.conf file .. code-block:: ini [connection:veos01] transport: http The name to use to retrieve the configuration would be veos01 >>> pyeapi.client.config.get_connection('veos01') Args: name (str): The name of the connection to return Returns: A Python dictionary object of key/value pairs that represent the node configuration. If the name provided in the argument is not found, then None is returned. """ name = 'connection:{}'.format(name) if not self.has_section(name): return None return dict(self.items(name))
python
{ "resource": "" }
q19911
Config.add_connection
train
def add_connection(self, name, **kwargs): """Adds a connection to the configuration This method will add a connection to the configuration. The connection added is only available for the lifetime of the object and is not persisted. Note: If a call is made to load() or reload(), any connections added with this method must be re-added to the config instance Args: name (str): The name of the connection to add to the config. The name provided will automatically be prepended with the string connection: **kwargs (dict); The set of properties used to provide the node configuration """ name = 'connection:{}'.format(name) self.add_section(name) for key, value in list(kwargs.items()): self.set(name, key, value) self.generate_tags()
python
{ "resource": "" }
q19912
Node._get_version_properties
train
def _get_version_properties(self): """Parses version and model information out of 'show version' output and uses the output to populate class properties. """ # Parse out version info output = self.enable('show version') self._version = str(output[0]['result']['version']) match = re.match('[\d.\d]+', output[0]['result']['version']) if match: self._version_number = str(match.group(0)) else: self._version_number = str(output[0]['result']['version']) # Parse out model number match = re.search('\d\d\d\d', output[0]['result']['modelName']) if match: self._model = str(match.group(0)) else: self._model = str(output[0]['result']['modelName'])
python
{ "resource": "" }
q19913
Node.config
train
def config(self, commands, **kwargs): """Configures the node with the specified commands This method is used to send configuration commands to the node. It will take either a string or a list and prepend the necessary commands to put the session into config mode. Args: commands (str, list): The commands to send to the node in config mode. If the commands argument is a string it will be cast to a list. The list of commands will also be prepended with the necessary commands to put the session in config mode. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: The config method will return a list of dictionaries with the output from each command. The function will strip the response from any commands it prepends. """ commands = make_iterable(commands) commands = list(commands) # push the configure command onto the command stack commands.insert(0, 'configure terminal') response = self.run_commands(commands, **kwargs) if self.autorefresh: self.refresh() # pop the configure command output off the stack response.pop(0) return response
python
{ "resource": "" }
q19914
Node.section
train
def section(self, regex, config='running_config'): """Returns a section of the config Args: regex (str): A valid regular expression used to select sections of configuration to return config (str): The configuration to return. Valid values for config are "running_config" or "startup_config". The default value is "running_config" Returns: The configuration section as a string object. """ if config in ['running_config', 'startup_config']: config = getattr(self, config) match = re.search(regex, config, re.M) if not match: raise TypeError('config section not found') block_start, line_end = match.regs[0] match = re.search(r'^[^\s]', config[line_end:], re.M) if not match: raise TypeError('could not find end block') _, block_end = match.regs[0] block_end = line_end + block_end return config[block_start:block_end]
python
{ "resource": "" }
q19915
Node.enable
train
def enable(self, commands, encoding='json', strict=False, send_enable=True, **kwargs): """Sends the array of commands to the node in enable mode This method will send the commands to the node and evaluate the results. If a command fails due to an encoding error, then the command set will be re-issued individual with text encoding. Args: commands (list): The list of commands to send to the node encoding (str): The requested encoding of the command output. Valid values for encoding are JSON or text strict (bool): If False, this method will attempt to run a command with text encoding if JSON encoding fails send_enable (bool): If True the enable command will be prepended to the command list automatically. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: A dict object that includes the response for each command along with the encoding Raises: TypeError: This method does not support sending configure commands and will raise a TypeError if configuration commands are found in the list of commands provided This method will also raise a TypeError if the specified encoding is not one of 'json' or 'text' CommandError: This method will raise a CommandError if any one of the commands fails. """ commands = make_iterable(commands) if 'configure' in commands: raise TypeError('config mode commands not supported') results = list() # IMPORTANT: There are two keys (response, result) that both # return the same value. 'response' was originally placed # there in error and both are now present to avoid breaking # existing scripts. 'response' will be removed in a future release. if strict: responses = self.run_commands(commands, encoding, send_enable, **kwargs) for index, response in enumerate(responses): results.append(dict(command=commands[index], result=response, response=response, encoding=encoding)) else: for command in commands: try: resp = self.run_commands(command, encoding, send_enable, **kwargs) results.append(dict(command=command, result=resp[0], encoding=encoding)) except CommandError as exc: if exc.error_code == 1003: resp = self.run_commands(command, 'text', send_enable, **kwargs) results.append(dict(command=command, result=resp[0], encoding='text')) else: raise return results
python
{ "resource": "" }
q19916
Node.run_commands
train
def run_commands(self, commands, encoding='json', send_enable=True, **kwargs): """Sends the commands over the transport to the device This method sends the commands to the device using the nodes transport. This is a lower layer function that shouldn't normally need to be used, preferring instead to use config() or enable(). Args: commands (list): The ordered list of commands to send to the device using the transport encoding (str): The encoding method to use for the request and excpected response. send_enable (bool): If True the enable command will be prepended to the command list automatically. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: This method will return the raw response from the connection which is a Python dictionary object. """ commands = make_iterable(commands) # Some commands are multiline commands. These are banner commands and # SSL commands. So with this two lines we # can support those by passing commands by doing: # banner login MULTILINE: This is my banner.\nAnd I even support # multiple lines. # Why this? To be able to read a configuration from a file, split it # into lines and pass it as it is # to pyeapi without caring about multiline commands. commands = [{'cmd': c.split('MULTILINE:')[0], 'input': '%s\n' % (c.split('MULTILINE:')[1].strip())} if 'MULTILINE:' in c else c for c in commands] if send_enable: if self._enablepwd: commands.insert(0, {'cmd': 'enable', 'input': self._enablepwd}) else: commands.insert(0, 'enable') response = self._connection.execute(commands, encoding, **kwargs) # pop enable command from the response only if we sent enable if send_enable: response['result'].pop(0) return response['result']
python
{ "resource": "" }
q19917
Node.api
train
def api(self, name, namespace='pyeapi.api'): """Loads the specified api module This method is the API autoload mechanism that will load the API module specified by the name argument. The API module will be loaded and look first for an initialize() function and secondly for an instance() function. In both cases, the node object is passed to the module. Args: name (str): The name of the module to load. The name should be the name of the python file to import namespace (str): The namespace to use to load the module. The default value is 'pyeapi.api' Returns: The API module loaded with the node instance. """ module = load_module('{}.{}'.format(namespace, name)) if hasattr(module, 'initialize'): module.initialize(self) if hasattr(module, 'instance'): return module.instance(self) return module
python
{ "resource": "" }
q19918
Node.get_config
train
def get_config(self, config='running-config', params=None, as_string=False): """ Retreives the config from the node This method will retrieve the config from the node as either a string or a list object. The config to retrieve can be specified as either the startup-config or the running-config. Args: config (str): Specifies to return either the nodes startup-config or running-config. The default value is the running-config params (str): A string of keywords to append to the command for retrieving the config. as_string (boo): Flag that determines the response. If True, then the configuration is returned as a raw string. If False, then the configuration is returned as a list. The default value is False Returns: This method will return either a string or a list depending on the states of the as_string keyword argument. Raises: TypeError: If the specified config is not one of either 'running-config' or 'startup-config' """ if config not in ['startup-config', 'running-config']: raise TypeError('invalid config name specified') command = 'show %s' % config if params: command += ' %s' % params result = self.run_commands(command, 'text') if as_string: return str(result[0]['output']).strip() return str(result[0]['output']).split('\n')
python
{ "resource": "" }
q19919
Routemaps.get
train
def get(self, name): """Provides a method to retrieve all routemap configuration related to the name attribute. Args: name (string): The name of the routemap. Returns: None if the specified routemap does not exists. If the routermap exists a dictionary will be provided as follows:: { 'deny': { 30: { 'continue': 200, 'description': None, 'match': ['as 2000', 'source-protocol ospf', 'interface Ethernet2'], 'set': [] } }, 'permit': { 10: { 'continue': 100, 'description': None, 'match': ['interface Ethernet1'], 'set': ['tag 50']}, 20: { 'continue': 200, 'description': None, 'match': ['as 2000', 'source-protocol ospf', 'interface Ethernet2'], 'set': [] } } } """ if not self.get_block(r'route-map\s%s\s\w+\s\d+' % name): return None return self._parse_entries(name)
python
{ "resource": "" }
q19920
Routemaps.create
train
def create(self, name, action, seqno): """Creates a new routemap on the node Note: This method will attempt to create the routemap regardless if the routemap exists or not. If the routemap already exists then this method will still return True. Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. Returns: True if the routemap could be created otherwise False (see Note) """ return self.configure('route-map %s %s %s' % (name, action, seqno))
python
{ "resource": "" }
q19921
Routemaps.delete
train
def delete(self, name, action, seqno): """Deletes the routemap from the node Note: This method will attempt to delete the routemap from the nodes operational config. If the routemap does not exist then this method will not perform any changes but still return True Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. Returns: True if the routemap could be deleted otherwise False (see Node) """ return self.configure('no route-map %s %s %s' % (name, action, seqno))
python
{ "resource": "" }
q19922
Routemaps.default
train
def default(self, name, action, seqno): """Defaults the routemap on the node Note: This method will attempt to default the routemap from the nodes operational config. Since routemaps do not exist by default, the default action is essentially a negation and the result will be the removal of the routemap clause. If the routemap does not exist then this method will not perform any changes but still return True Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. Returns: True if the routemap could be deleted otherwise False (see Node) """ return self.configure('default route-map %s %s %s' % (name, action, seqno))
python
{ "resource": "" }
q19923
Routemaps.set_match_statements
train
def set_match_statements(self, name, action, seqno, statements): """Configures the match statements within the routemap clause. The final configuration of match statements will reflect the list of statements passed into the statements attribute. This implies match statements found in the routemap that are not specified in the statements attribute will be removed. Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. statements (list): A list of the match-related statements. Note that the statements should omit the leading match. Returns: True if the operation succeeds otherwise False """ try: current_statements = self.get(name)[action][seqno]['match'] except: current_statements = [] commands = list() # remove set statements from current routemap for entry in set(current_statements).difference(statements): commands.append('route-map %s %s %s' % (name, action, seqno)) commands.append('no match %s' % entry) # add new set statements to the routemap for entry in set(statements).difference(current_statements): commands.append('route-map %s %s %s' % (name, action, seqno)) commands.append('match %s' % entry) return self.configure(commands) if commands else True
python
{ "resource": "" }
q19924
Routemaps.set_continue
train
def set_continue(self, name, action, seqno, value=None, default=False, disable=False): """Configures the routemap continue value Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. value (integer): The value to configure for the routemap continue default (bool): Specifies to default the routemap continue value disable (bool): Specifies to negate the routemap continue value Returns: True if the operation succeeds otherwise False is returned """ commands = ['route-map %s %s %s' % (name, action, seqno)] if default: commands.append('default continue') elif disable: commands.append('no continue') else: if not str(value).isdigit() or value < 1: raise ValueError('seqno must be a positive integer unless ' 'default or disable is specified') commands.append('continue %s' % value) return self.configure(commands)
python
{ "resource": "" }
q19925
Routemaps.set_description
train
def set_description(self, name, action, seqno, value=None, default=False, disable=False): """Configures the routemap description Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. value (string): The value to configure for the routemap description default (bool): Specifies to default the routemap description value disable (bool): Specifies to negate the routemap description Returns: True if the operation succeeds otherwise False is returned """ commands = ['route-map %s %s %s' % (name, action, seqno)] if value is not None: # Before assigning a new description, clear any existing desc commands.append(self.command_builder('description', disable=True)) commands.append(self.command_builder('description', value=value, default=default, disable=disable)) return self.configure(commands)
python
{ "resource": "" }
q19926
EnsembleMethod._calc_delta
train
def _calc_delta(self,ensemble,scaling_matrix=None): ''' calc the scaled ensemble differences from the mean ''' mean = np.array(ensemble.mean(axis=0)) delta = ensemble.as_pyemu_matrix() for i in range(ensemble.shape[0]): delta.x[i,:] -= mean if scaling_matrix is not None: delta = scaling_matrix * delta.T delta *= (1.0 / np.sqrt(float(ensemble.shape[0] - 1.0))) return delta
python
{ "resource": "" }
q19927
EnsembleMethod._calc_obs_local
train
def _calc_obs_local(self,parensemble): ''' propagate the ensemble forward using sweep. ''' self.logger.log("evaluating ensemble of size {0} locally with sweep".\ format(parensemble.shape[0])) parensemble.to_csv(self.sweep_in_csv) if self.num_slaves > 0: master_thread = self._get_master_thread() pyemu.utils.start_slaves(self.slave_dir,"pestpp-swp",self.pst.filename, self.num_slaves,slave_root='..',port=self.port) master_thread.join() else: os.system("pestpp-swp {0}".format(self.pst.filename)) self.logger.log("evaluating ensemble of size {0} locally with sweep".\ format(parensemble.shape[0]))
python
{ "resource": "" }
q19928
concat
train
def concat(mats): """Concatenate Matrix objects. Tries either axis. Parameters ---------- mats: list list of Matrix objects Returns ------- Matrix : Matrix """ for mat in mats: if mat.isdiagonal: raise NotImplementedError("concat not supported for diagonal mats") row_match = True col_match = True for mat in mats[1:]: if sorted(mats[0].row_names) != sorted(mat.row_names): row_match = False if sorted(mats[0].col_names) != sorted(mat.col_names): col_match = False if not row_match and not col_match: raise Exception("mat_handler.concat(): all Matrix objects"+\ "must share either rows or cols") if row_match and col_match: raise Exception("mat_handler.concat(): all Matrix objects"+\ "share both rows and cols") if row_match: row_names = copy.deepcopy(mats[0].row_names) col_names = [] for mat in mats: col_names.extend(copy.deepcopy(mat.col_names)) x = mats[0].newx.copy() for mat in mats[1:]: mat.align(mats[0].row_names, axis=0) other_x = mat.newx x = np.append(x, other_x, axis=1) else: col_names = copy.deepcopy(mats[0].col_names) row_names = [] for mat in mats: row_names.extend(copy.deepcopy(mat.row_names)) x = mats[0].newx.copy() for mat in mats[1:]: mat.align(mats[0].col_names, axis=1) other_x = mat.newx x = np.append(x, other_x, axis=0) return Matrix(x=x, row_names=row_names, col_names=col_names)
python
{ "resource": "" }
q19929
get_common_elements
train
def get_common_elements(list1, list2): """find the common elements in two lists. used to support auto align might be faster with sets Parameters ---------- list1 : list a list of objects list2 : list a list of objects Returns ------- list : list list of common objects shared by list1 and list2 """ #result = [] #for item in list1: # if item in list2: # result.append(item) #Return list(set(list1).intersection(set(list2))) set2 = set(list2) result = [item for item in list1 if item in set2] return result
python
{ "resource": "" }
q19930
__set_svd
train
def __set_svd(self): """private method to set SVD components. Note: this should not be called directly """ if self.isdiagonal: x = np.diag(self.x.flatten()) else: # just a pointer to x x = self.x try: u, s, v = la.svd(x, full_matrices=True) v = v.transpose() except Exception as e: print("standard SVD failed: {0}".format(str(e))) try: v, s, u = la.svd(x.transpose(), full_matrices=True) u = u.transpose() except Exception as e: np.savetxt("failed_svd.dat",x,fmt="%15.6E") raise Exception("Matrix.__set_svd(): " + "unable to compute SVD of self.x, " + "saved matrix to 'failed_svd.dat' -- {0}".\ format(str(e))) col_names = ["left_sing_vec_" + str(i + 1) for i in range(u.shape[1])] self.__u = Matrix(x=u, row_names=self.row_names, col_names=col_names, autoalign=False) sing_names = ["sing_val_" + str(i + 1) for i in range(s.shape[0])] self.__s = Matrix(x=np.atleast_2d(s).transpose(), row_names=sing_names, col_names=sing_names, isdiagonal=True, autoalign=False) col_names = ["right_sing_vec_" + str(i + 1) for i in range(v.shape[0])] self.__v = Matrix(v, row_names=self.col_names, col_names=col_names, autoalign=False)
python
{ "resource": "" }
q19931
element_isaligned
train
def element_isaligned(self, other): """check if matrices are aligned for element-wise operations Parameters ---------- other : Matrix Returns ------- bool : bool True if aligned, False if not aligned """ assert isinstance(other, Matrix), \ "Matrix.isaligned(): other argument must be type Matrix, not: " +\ str(type(other)) if self.row_names == other.row_names \ and self.col_names == other.col_names: return True else: return False
python
{ "resource": "" }
q19932
as_2d
train
def as_2d(self): """ get a 2D representation of x. If not self.isdiagonal, simply return reference to self.x, otherwise, constructs and returns a 2D, diagonal ndarray Returns ------- numpy.ndarray : numpy.ndarray """ if not self.isdiagonal: return self.x return np.diag(self.x.flatten())
python
{ "resource": "" }
q19933
shape
train
def shape(self): """get the implied, 2D shape of self Returns ------- tuple : tuple length 2 tuple of ints """ if self.__x is not None: if self.isdiagonal: return (max(self.__x.shape), max(self.__x.shape)) if len(self.__x.shape) == 1: raise Exception("Matrix.shape: Matrix objects must be 2D") return self.__x.shape return None
python
{ "resource": "" }
q19934
transpose
train
def transpose(self): """transpose operation of self Returns ------- Matrix : Matrix transpose of self """ if not self.isdiagonal: return type(self)(x=self.__x.copy().transpose(), row_names=self.col_names, col_names=self.row_names, autoalign=self.autoalign) else: return type(self)(x=self.__x.copy(), row_names=self.row_names, col_names=self.col_names, isdiagonal=True, autoalign=self.autoalign)
python
{ "resource": "" }
q19935
inv
train
def inv(self): """inversion operation of self Returns ------- Matrix : Matrix inverse of self """ if self.isdiagonal: inv = 1.0 / self.__x if (np.any(~np.isfinite(inv))): idx = np.isfinite(inv) np.savetxt("testboo.dat",idx) invalid = [self.row_names[i] for i in range(idx.shape[0]) if idx[i] == 0.0] raise Exception("Matrix.inv has produced invalid floating points " + " for the following elements:" + ','.join(invalid)) return type(self)(x=inv, isdiagonal=True, row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign) else: return type(self)(x=la.inv(self.__x), row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign)
python
{ "resource": "" }
q19936
get_maxsing
train
def get_maxsing(self,eigthresh=1.0e-5): """ Get the number of singular components with a singular value ratio greater than or equal to eigthresh Parameters ---------- eigthresh : float the ratio of the largest to smallest singular value Returns ------- int : int number of singular components """ #sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh) sthresh = self.s.x.flatten()/self.s.x[0] ising = 0 for i,st in enumerate(sthresh): if st > eigthresh: ising += 1 #return max(1,i) else: break #return max(1,np.argmin(sthresh)) return max(1,ising)
python
{ "resource": "" }
q19937
pseudo_inv
train
def pseudo_inv(self,maxsing=None,eigthresh=1.0e-5): """ The pseudo inverse of self. Formed using truncated singular value decomposition and Matrix.pseudo_inv_components Parameters ---------- maxsing : int the number of singular components to use. If None, maxsing is calculated using Matrix.get_maxsing() and eigthresh eigthresh : float the ratio of largest to smallest singular components to use for truncation. Ignored if maxsing is not None Returns ------- Matrix : Matrix """ if maxsing is None: maxsing = self.get_maxsing(eigthresh=eigthresh) full_s = self.full_s.T for i in range(self.s.shape[0]): if i <= maxsing: full_s.x[i,i] = 1.0 / full_s.x[i,i] else: full_s.x[i,i] = 0.0 return self.v * full_s * self.u.T
python
{ "resource": "" }
q19938
sqrt
train
def sqrt(self): """square root operation Returns ------- Matrix : Matrix square root of self """ if self.isdiagonal: return type(self)(x=np.sqrt(self.__x), isdiagonal=True, row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign) elif self.shape[1] == 1: #a vector return type(self)(x=np.sqrt(self.__x), isdiagonal=False, row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign) else: return type(self)(x=la.sqrtm(self.__x), row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign)
python
{ "resource": "" }
q19939
full_s
train
def full_s(self): """ Get the full singular value matrix of self Returns ------- Matrix : Matrix """ x = np.zeros((self.shape),dtype=np.float32) x[:self.s.shape[0],:self.s.shape[0]] = self.s.as_2d s = Matrix(x=x, row_names=self.row_names, col_names=self.col_names, isdiagonal=False, autoalign=False) return s
python
{ "resource": "" }
q19940
zero2d
train
def zero2d(self): """ get an 2D instance of self with all zeros Returns ------- Matrix : Matrix """ return type(self)(x=np.atleast_2d(np.zeros((self.shape[0],self.shape[1]))), row_names=self.row_names, col_names=self.col_names, isdiagonal=False)
python
{ "resource": "" }
q19941
Ensemble.as_pyemu_matrix
train
def as_pyemu_matrix(self,typ=Matrix): """ Create a pyemu.Matrix from the Ensemble. Parameters ---------- typ : pyemu.Matrix or derived type the type of matrix to return Returns ------- pyemu.Matrix : pyemu.Matrix """ x = self.values.copy().astype(np.float) return typ(x=x,row_names=list(self.index), col_names=list(self.columns))
python
{ "resource": "" }
q19942
Ensemble.draw
train
def draw(self,cov,num_reals=1,names=None): """ draw random realizations from a multivariate Gaussian distribution Parameters ---------- cov: pyemu.Cov covariance structure to draw from num_reals: int number of realizations to generate names : list list of columns names to draw for. If None, values all names are drawn """ real_names = np.arange(num_reals,dtype=np.int64) # make sure everything is cool WRT ordering if names is not None: vals = self.mean_values.loc[names] cov = cov.get(names) elif self.names != cov.row_names: names = get_common_elements(self.names, cov.row_names) vals = self.mean_values.loc[names] cov = cov.get(names) else: vals = self.mean_values names = self.names # generate random numbers if cov.isdiagonal: #much faster val_array = np.array([np.random.normal(mu,std,size=num_reals) for\ mu,std in zip(vals,np.sqrt(cov.x))]).transpose() else: val_array = np.random.multivariate_normal(vals, cov.as_2d,num_reals) self.loc[:,:] = np.NaN self.dropna(inplace=True) # this sucks - can only set by enlargement one row at a time for rname,vals in zip(real_names,val_array): self.loc[rname, names] = vals # set NaNs to mean_values idx = pd.isnull(self.loc[rname,:]) self.loc[rname,idx] = self.mean_values[idx]
python
{ "resource": "" }
q19943
Ensemble.plot
train
def plot(self,bins=10,facecolor='0.5',plot_cols=None, filename="ensemble.pdf",func_dict = None, **kwargs): """plot ensemble histograms to multipage pdf Parameters ---------- bins : int number of bins facecolor : str color plot_cols : list of str subset of ensemble columns to plot. If None, all are plotted. Default is None filename : str pdf filename. Default is "ensemble.pdf" func_dict : dict a dict of functions to apply to specific columns (e.g., np.log10) **kwargs : dict keyword args to pass to plot_utils.ensemble_helper() Returns ------- None """ ensemble_helper(self,bins=bins,facecolor=facecolor,plot_cols=plot_cols, filename=filename)
python
{ "resource": "" }
q19944
Ensemble.from_dataframe
train
def from_dataframe(cls,**kwargs): """class method constructor to create an Ensemble from a pandas.DataFrame Parameters ---------- **kwargs : dict optional args to pass to the Ensemble Constructor. Expects 'df' in kwargs.keys() that must be a pandas.DataFrame instance Returns ------- Ensemble : Ensemble """ df = kwargs.pop("df") assert isinstance(df,pd.DataFrame) df.columns = [c.lower() for c in df.columns] mean_values = kwargs.pop("mean_values",df.mean(axis=0)) e = cls(data=df,index=df.index,columns=df.columns, mean_values=mean_values,**kwargs) return e
python
{ "resource": "" }
q19945
Ensemble.copy
train
def copy(self): """make a deep copy of self Returns ------- Ensemble : Ensemble """ df = super(Ensemble,self).copy() return type(self).from_dataframe(df=df)
python
{ "resource": "" }
q19946
Ensemble.covariance_matrix
train
def covariance_matrix(self,localizer=None): """calculate the approximate covariance matrix implied by the ensemble using mean-differencing operation at the core of EnKF Parameters ---------- localizer : pyemu.Matrix covariance localizer to apply Returns ------- cov : pyemu.Cov covariance matrix """ mean = np.array(self.mean(axis=0)) delta = self.as_pyemu_matrix(typ=Cov) for i in range(self.shape[0]): delta.x[i, :] -= mean delta *= (1.0 / np.sqrt(float(self.shape[0] - 1.0))) if localizer is not None: delta = delta.T * delta return delta.hadamard_product(localizer) return delta.T * delta
python
{ "resource": "" }
q19947
ObservationEnsemble.mean_values
train
def mean_values(self): """ property decorated method to get mean values of observation noise. This is a zero-valued pandas.Series Returns ------- mean_values : pandas Series """ vals = self.pst.observation_data.obsval.copy() vals.loc[self.names] = 0.0 return vals
python
{ "resource": "" }
q19948
ObservationEnsemble.nonzero
train
def nonzero(self): """ property decorated method to get a new ObservationEnsemble of only non-zero weighted observations Returns ------- ObservationEnsemble : ObservationEnsemble """ df = self.loc[:,self.pst.nnz_obs_names] return ObservationEnsemble.from_dataframe(df=df, pst=self.pst.get(obs_names=self.pst.nnz_obs_names))
python
{ "resource": "" }
q19949
ObservationEnsemble.from_binary
train
def from_binary(cls,pst,filename): """instantiate an observation obsemble from a jco-type file Parameters ---------- pst : pyemu.Pst a Pst instance filename : str the binary file name Returns ------- oe : ObservationEnsemble """ m = Matrix.from_binary(filename) return ObservationEnsemble(data=m.x,pst=pst, index=m.row_names)
python
{ "resource": "" }
q19950
ParameterEnsemble.mean_values
train
def mean_values(self): """ the mean value vector while respecting log transform Returns ------- mean_values : pandas.Series """ if not self.istransformed: return self.pst.parameter_data.parval1.copy() else: # vals = (self.pst.parameter_data.parval1 * # self.pst.parameter_data.scale) +\ # self.pst.parameter_data.offset vals = self.pst.parameter_data.parval1.copy() vals[self.log_indexer] = np.log10(vals[self.log_indexer]) return vals
python
{ "resource": "" }
q19951
ParameterEnsemble.adj_names
train
def adj_names(self): """ Get the names of adjustable parameters in the ParameterEnsemble Returns ------- list : list adjustable parameter names """ return list(self.pst.parameter_data.parnme.loc[~self.fixed_indexer])
python
{ "resource": "" }
q19952
ParameterEnsemble.ubnd
train
def ubnd(self): """ the upper bound vector while respecting log transform Returns ------- ubnd : pandas.Series """ if not self.istransformed: return self.pst.parameter_data.parubnd.copy() else: ub = self.pst.parameter_data.parubnd.copy() ub[self.log_indexer] = np.log10(ub[self.log_indexer]) return ub
python
{ "resource": "" }
q19953
ParameterEnsemble.lbnd
train
def lbnd(self): """ the lower bound vector while respecting log transform Returns ------- lbnd : pandas.Series """ if not self.istransformed: return self.pst.parameter_data.parlbnd.copy() else: lb = self.pst.parameter_data.parlbnd.copy() lb[self.log_indexer] = np.log10(lb[self.log_indexer]) return lb
python
{ "resource": "" }
q19954
ParameterEnsemble.fixed_indexer
train
def fixed_indexer(self): """ indexer for fixed status Returns ------- fixed_indexer : pandas.Series """ #isfixed = self.pst.parameter_data.partrans == "fixed" isfixed = self.pst.parameter_data.partrans.\ apply(lambda x : x in ["fixed","tied"]) return isfixed.values
python
{ "resource": "" }
q19955
ParameterEnsemble.draw
train
def draw(self,cov,num_reals=1,how="normal",enforce_bounds=None): """draw realizations of parameter values Parameters ---------- cov : pyemu.Cov covariance matrix that describes the support around the mean parameter values num_reals : int number of realizations to generate how : str distribution to use to generate realizations. Options are 'normal' or 'uniform'. Default is 'normal'. If 'uniform', cov argument is ignored enforce_bounds : str how to enforce parameter bound violations. Options are 'reset' (reset individual violating values), 'drop' (drop realizations that have one or more violating values. Default is None (no bounds enforcement) """ how = how.lower().strip() if not self.istransformed: self._transform() if how == "uniform": self._draw_uniform(num_reals=num_reals) else: super(ParameterEnsemble,self).draw(cov,num_reals=num_reals) # replace the realizations for fixed parameters with the original # parval1 in the control file self.pst.parameter_data.index = self.pst.parameter_data.parnme fixed_vals = self.pst.parameter_data.loc[self.fixed_indexer,"parval1"] for fname,fval in zip(fixed_vals.index,fixed_vals.values): #if fname not in self.columns: # continue self.loc[:,fname] = fval istransformed = self.pst.parameter_data.loc[:,"partrans"] == "log" self.loc[:,istransformed] = 10.0**self.loc[:,istransformed] self.__istransformed = False #self._applied_tied() self.enforce(enforce_bounds)
python
{ "resource": "" }
q19956
ParameterEnsemble.from_uniform_draw
train
def from_uniform_draw(cls,pst,num_reals): """ instantiate a parameter ensemble from uniform draws Parameters ---------- pst : pyemu.Pst a control file instance num_reals : int number of realizations to generate Returns ------- ParameterEnsemble : ParameterEnsemble """ #if not pe.istransformed: # pe._transform() #ub = pe.ubnd #lb = pe.lbnd li = pst.parameter_data.partrans == "log" ub = pst.parameter_data.parubnd.copy() ub.loc[li] = ub.loc[li].apply(np.log10) ub = ub.to_dict() lb = pst.parameter_data.parlbnd.copy() lb.loc[li] = lb.loc[li].apply(np.log10) lb = lb.to_dict() # set up some column names #real_names = ["{0:d}".format(i) # for i in range(num_reals)] real_names = np.arange(num_reals,dtype=np.int64) arr = np.empty((num_reals,len(ub))) adj_par_names = set(pst.adj_par_names) for i,pname in enumerate(pst.parameter_data.parnme): #print(pname,lb[pname],ub[pname]) if pname in adj_par_names: arr[:,i] = np.random.uniform(lb[pname], ub[pname], size=num_reals) else: arr[:,i] = np.zeros((num_reals)) + \ pst.parameter_data.\ loc[pname,"parval1"] #print("back transforming") df = pd.DataFrame(arr,index=real_names,columns=pst.par_names) df.loc[:,li] = 10.0**df.loc[:,li] new_pe = cls.from_dataframe(pst=pst,df=pd.DataFrame(data=arr,columns=pst.par_names)) #new_pe._applied_tied() return new_pe
python
{ "resource": "" }
q19957
ParameterEnsemble.from_binary
train
def from_binary(cls, pst, filename): """instantiate an parameter obsemble from a jco-type file Parameters ---------- pst : pyemu.Pst a Pst instance filename : str the binary file name Returns ------- pe : ParameterEnsemble """ m = Matrix.from_binary(filename).to_dataframe() return ParameterEnsemble.from_dataframe(df=m, pst=pst)
python
{ "resource": "" }
q19958
ParameterEnsemble._back_transform
train
def _back_transform(self,inplace=True): """ Private method to remove log10 transformation from ensemble Parameters ---------- inplace: bool back transform self in place Returns ------ ParameterEnsemble : ParameterEnsemble if inplace if False Note ---- Don't call this method unless you know what you are doing """ if not self.istransformed: raise Exception("ParameterEnsemble already back transformed") istransformed = self.pst.parameter_data.loc[:,"partrans"] == "log" if inplace: self.loc[:,istransformed] = 10.0**(self.loc[:,istransformed]) self.loc[:,:] = (self.loc[:,:] -\ self.pst.parameter_data.offset)/\ self.pst.parameter_data.scale self.__istransformed = False else: vals = (self.pst.parameter_data.parval1 -\ self.pst.parameter_data.offset) /\ self.pst.parameter_data.scale new_en = ParameterEnsemble(pst=self.pst.get(),data=self.loc[:,:].copy(), columns=self.columns, mean_values=vals,istransformed=False) new_en.loc[:,istransformed] = 10.0**(self.loc[:,istransformed]) new_en.loc[:,:] = (new_en.loc[:,:] -\ new_en.pst.parameter_data.offset)/\ new_en.pst.parameter_data.scale return new_en
python
{ "resource": "" }
q19959
ParameterEnsemble._transform
train
def _transform(self,inplace=True): """ Private method to perform log10 transformation for ensemble Parameters ---------- inplace: bool transform self in place Returns ------- ParameterEnsemble : ParameterEnsemble if inplace is False Note ---- Don't call this method unless you know what you are doing """ if self.istransformed: #raise Exception("ParameterEnsemble already transformed") return istransformed = self.pst.parameter_data.loc[:,"partrans"] == "log" if inplace: #self.loc[:,istransformed] = np.log10(self.loc[:,istransformed]) self.loc[:,:] = (self.loc[:,:] * self.pst.parameter_data.scale) +\ self.pst.parameter_data.offset self.loc[:,istransformed] = self.loc[:,istransformed].applymap(lambda x: math.log10(x)) self.__istransformed = True else: vals = self.pst.parameter_data.parval1.copy() new_en = ParameterEnsemble(pst=self.pst.get(),data=self.loc[:,:].copy(), columns=self.columns, mean_values=vals,istransformed=True) new_en.loc[:,:] = (new_en.loc[:,:] * self.pst.parameter_data.scale) +\ new_en.pst.parameter_data.offset new_en.loc[:,istransformed] = self.loc[:,istransformed].applymap(lambda x: math.log10(x)) return new_en
python
{ "resource": "" }
q19960
ParameterEnsemble.project
train
def project(self,projection_matrix,inplace=True,log=None, enforce_bounds="reset"): """ project the ensemble using the null-space Monte Carlo method Parameters ---------- projection_matrix : pyemu.Matrix projection operator - must already respect log transform inplace : bool project self or return a new ParameterEnsemble instance log: pyemu.Logger for logging progress enforce_bounds : str parameter bound enforcement flag. 'drop' removes offending realizations, 'reset' resets offending values Returns ------- ParameterEnsemble : ParameterEnsemble if inplace is False """ if self.istransformed: self._back_transform() istransformed = self.pst.parameter_data.loc[:,"partrans"] == "log" self.loc[:,istransformed] = self.loc[:,istransformed].applymap(lambda x: math.log10(x)) self.__istransformed = True #make sure everything is cool WRT ordering common_names = get_common_elements(self.adj_names, projection_matrix.row_names) base = self.mean_values.loc[common_names] projection_matrix = projection_matrix.get(common_names,common_names) if not inplace: new_en = ParameterEnsemble(pst=self.pst.get(),data=self.loc[:,:].copy(), columns=self.columns, mean_values=self.mean_values.copy(), istransformed=self.istransformed) for real in self.index: if log is not None: log("projecting realization {0}".format(real)) # null space projection of difference vector pdiff = self.loc[real,common_names] - base pdiff = np.dot(projection_matrix.x, (self.loc[real,common_names] - base)\ .values) if inplace: self.loc[real,common_names] = base + pdiff else: new_en.loc[real,common_names] = base + pdiff if log is not None: log("projecting realization {0}".format(real)) if not inplace: new_en.enforce(enforce_bounds) new_en.loc[:,istransformed] = 10.0**new_en.loc[:,istransformed] new_en.__istransformed = False #new_en._back_transform() return new_en self.enforce(enforce_bounds) self.loc[:,istransformed] = 10.0**self.loc[:,istransformed] self.__istransformed = False
python
{ "resource": "" }
q19961
ParameterEnsemble.enforce_drop
train
def enforce_drop(self): """ enforce parameter bounds on the ensemble by dropping violating realizations """ ub = self.ubnd lb = self.lbnd drop = [] for id in self.index: #mx = (ub - self.loc[id,:]).min() #mn = (lb - self.loc[id,:]).max() if (ub - self.loc[id,:]).min() < 0.0 or\ (lb - self.loc[id,:]).max() > 0.0: drop.append(id) self.loc[drop,:] = np.NaN self.dropna(inplace=True)
python
{ "resource": "" }
q19962
ParameterEnsemble.enforce_reset
train
def enforce_reset(self): """enforce parameter bounds on the ensemble by resetting violating vals to bound """ ub = (self.ubnd * (1.0+self.bound_tol)).to_dict() lb = (self.lbnd * (1.0 - self.bound_tol)).to_dict() #for iname,name in enumerate(self.columns): #self.loc[self.loc[:,name] > ub[name],name] = ub[name] * (1.0 + self.bound_tol) #self.loc[self.loc[:,name] < lb[name],name] = lb[name].copy() * (1.0 - self.bound_tol) # self.loc[self.loc[:,name] > ub[name],name] = ub[name] # self.loc[self.loc[:,name] < lb[name],name] = lb[name] val_arr = self.values for iname, name in enumerate(self.columns): val_arr[val_arr[:,iname] > ub[name],iname] = ub[name] val_arr[val_arr[:, iname] < lb[name],iname] = lb[name]
python
{ "resource": "" }
q19963
ParameterEnsemble.to_binary
train
def to_binary(self,filename): """write the parameter ensemble to a jco-style binary file Parameters ---------- filename : str the filename to write Returns ------- None Note ---- this function back-transforms inplace with respect to log10 before writing """ retrans = False if self.istransformed: self._back_transform(inplace=True) retrans = True if self.isnull().values.any(): warnings.warn("NaN in par ensemble",PyemuWarning) self.as_pyemu_matrix().to_coo(filename) if retrans: self._transform(inplace=True)
python
{ "resource": "" }
q19964
ParameterEnsemble.to_parfiles
train
def to_parfiles(self,prefix): """ write the parameter ensemble to PEST-style parameter files Parameters ---------- prefix: str file prefix for par files Note ---- this function back-transforms inplace with respect to log10 before writing """ if self.isnull().values.any(): warnings.warn("NaN in par ensemble",PyemuWarning) if self.istransformed: self._back_transform(inplace=True) par_df = self.pst.parameter_data.loc[:, ["parnme","parval1","scale","offset"]].copy() for real in self.index: par_file = "{0}{1}.par".format(prefix,real) par_df.loc[:,"parval1"] =self.loc[real,:] write_parfile(par_df,par_file)
python
{ "resource": "" }
q19965
EnsembleKalmanFilter.forecast
train
def forecast(self,parensemble=None): """for the enkf formulation, this simply moves the ensemble forward by running the model once for each realization""" if parensemble is None: parensemble = self.parensemble self.logger.log("evaluating ensemble") failed_runs, obsensemble = self._calc_obs(parensemble) if failed_runs is not None: self.logger.warn("dropping failed realizations") parensemble.loc[failed_runs, :] = np.NaN parensemble = parensemble.dropna() obsensemble.loc[failed_runs, :] = np.NaN obsensemble = obsensemble.dropna() self.logger.log("evaluating ensemble") return obsensemble
python
{ "resource": "" }
q19966
EnsembleKalmanFilter.update
train
def update(self): """update performs the analysis, then runs the forecast using the updated self.parensemble. This can be called repeatedly to iterate...""" parensemble = self.analysis_evensen() obsensemble = self.forecast(parensemble=parensemble) # todo: check for phi improvement if True: self.obsensemble = obsensemble self.parensemble = parensemble self.iter_num += 1
python
{ "resource": "" }
q19967
run
train
def run(cmd_str,cwd='.',verbose=False): """ an OS agnostic function to execute a command line Parameters ---------- cmd_str : str the str to execute with os.system() cwd : str the directory to execute the command in verbose : bool flag to echo to stdout complete cmd str Note ---- uses platform to detect OS and adds .exe suffix or ./ prefix as appropriate for Windows, if os.system returns non-zero, raises exception Example ------- ``>>>import pyemu`` ``>>>pyemu.helpers.run("pestpp pest.pst")`` """ bwd = os.getcwd() os.chdir(cwd) try: exe_name = cmd_str.split()[0] if "window" in platform.platform().lower(): if not exe_name.lower().endswith("exe"): raw = cmd_str.split() raw[0] = exe_name + ".exe" cmd_str = ' '.join(raw) else: if exe_name.lower().endswith('exe'): raw = cmd_str.split() exe_name = exe_name.replace('.exe','') raw[0] = exe_name cmd_str = '{0} {1} '.format(*raw) if os.path.exists(exe_name) and not exe_name.startswith('./'): cmd_str = "./" + cmd_str except Exception as e: os.chdir(bwd) raise Exception("run() error preprocessing command line :{0}".format(str(e))) if verbose: print("run():{0}".format(cmd_str)) try: ret_val = os.system(cmd_str) except Exception as e: os.chdir(bwd) raise Exception("run() raised :{0}".format(str(e))) os.chdir(bwd) if "window" in platform.platform().lower(): if ret_val != 0: raise Exception("run() returned non-zero")
python
{ "resource": "" }
q19968
RegData.write
train
def write(self,f): """ write the regularization section to an open file handle Parameters ---------- f : file handle """ f.write("* regularization\n") for vline in REG_VARIABLE_LINES: vraw = vline.strip().split() for v in vraw: v = v.replace("[",'').replace("]",'') if v not in self.optional_dict.keys(): raise Exception("RegData missing attribute {0}".format(v)) f.write("{0} ".format(self.__getattribute__(v))) f.write("\n")
python
{ "resource": "" }
q19969
SvdData.write
train
def write(self,f): """ write an SVD section to a file handle Parameters ---------- f : file handle """ f.write("* singular value decomposition\n") f.write(IFMT(self.svdmode)+'\n') f.write(IFMT(self.maxsing)+' '+FFMT(self.eigthresh)+"\n") f.write('{0}\n'.format(self.eigwrite))
python
{ "resource": "" }
q19970
SvdData.parse_values_from_lines
train
def parse_values_from_lines(self,lines): """ parse values from lines of the SVD section Parameters ---------- lines : list """ assert len(lines) == 3,"SvdData.parse_values_from_lines: expected " + \ "3 lines, not {0}".format(len(lines)) try: self.svdmode = int(lines[0].strip().split()[0]) except Exception as e: raise Exception("SvdData.parse_values_from_lines: error parsing" + \ " svdmode from line {0}: {1} \n".format(lines[0],str(e))) try: raw = lines[1].strip().split() self.maxsing = int(raw[0]) self.eigthresh = float(raw[1]) except Exception as e: raise Exception("SvdData.parse_values_from_lines: error parsing" + \ " maxsing and eigthresh from line {0}: {1} \n"\ .format(lines[1],str(e))) # try: # self.eigwrite = int(lines[2].strip()) # except Exception as e: # raise Exception("SvdData.parse_values_from_lines: error parsing" + \ # " eigwrite from line {0}: {1} \n".format(lines[2],str(e))) self.eigwrite = lines[2].strip()
python
{ "resource": "" }
q19971
ControlData.formatted_values
train
def formatted_values(self): """ list the entries and current values in the control data section Returns ------- formatted_values : pandas.Series """ return self._df.apply(lambda x: self.formatters[x["type"]](x["value"]),axis=1)
python
{ "resource": "" }
q19972
read_struct_file
train
def read_struct_file(struct_file,return_type=GeoStruct): """read an existing PEST-type structure file into a GeoStruct instance Parameters ---------- struct_file : (str) existing pest-type structure file return_type : (object) the instance type to return. Default is GeoStruct Returns ------- GeoStruct : list or GeoStruct Note ---- if only on structure is listed in struct_file, then return type is GeoStruct. Otherwise, return type is a list of GeoStruct Example ------- ``>>>import pyemu`` ``>>>gs = pyemu.utils.geostats.reads_struct_file("struct.dat")`` """ VARTYPE = {1:SphVario,2:ExpVario,3:GauVario,4:None} assert os.path.exists(struct_file) structures = [] variograms = [] with open(struct_file,'r') as f: while True: line = f.readline() if line == '': break line = line.strip().lower() if line.startswith("structure"): name = line.strip().split()[1] nugget,transform,variogram_info = _read_structure_attributes(f) s = return_type(nugget=nugget,transform=transform,name=name) s.variogram_info = variogram_info # not sure what is going on, but if I don't copy s here, # all the structures end up sharing all the variograms later structures.append(copy.deepcopy(s)) elif line.startswith("variogram"): name = line.strip().split()[1].lower() vartype,bearing,a,anisotropy = _read_variogram(f) if name in variogram_info: v = VARTYPE[vartype](variogram_info[name],a,anisotropy=anisotropy, bearing=bearing,name=name) variograms.append(v) for i,st in enumerate(structures): for vname in st.variogram_info: vfound = None for v in variograms: if v.name == vname: vfound = v break if vfound is None: raise Exception("variogram {0} not found for structure {1}".\ format(vname,s.name)) st.variograms.append(vfound) if len(structures) == 1: return structures[0] return structures
python
{ "resource": "" }
q19973
_read_variogram
train
def _read_variogram(f): """Function to instantiate a Vario2d from a PEST-style structure file Parameters ---------- f : (file handle) file handle opened for reading Returns ------- Vario2d : Vario2d Vario2d derived type """ line = '' vartype = None bearing = 0.0 a = None anisotropy = 1.0 while "end variogram" not in line: line = f.readline() if line == '': raise Exception("EOF while read variogram") line = line.strip().lower().split() if line[0].startswith('#'): continue if line[0] == "vartype": vartype = int(line[1]) elif line[0] == "bearing": bearing = float(line[1]) elif line[0] == "a": a = float(line[1]) elif line[0] == "anisotropy": anisotropy = float(line[1]) elif line[0] == "end": break else: raise Exception("unrecognized arg in variogram:{0}".format(line[0])) return vartype,bearing,a,anisotropy
python
{ "resource": "" }
q19974
_read_structure_attributes
train
def _read_structure_attributes(f): """ function to read information from a PEST-style structure file Parameters ---------- f : (file handle) file handle open for reading Returns ------- nugget : float the GeoStruct nugget transform : str the GeoStruct transformation variogram_info : dict dictionary of structure-level variogram information """ line = '' variogram_info = {} while "end structure" not in line: line = f.readline() if line == '': raise Exception("EOF while reading structure") line = line.strip().lower().split() if line[0].startswith('#'): continue if line[0] == "nugget": nugget = float(line[1]) elif line[0] == "transform": transform = line[1] elif line[0] == "numvariogram": numvariograms = int(line[1]) elif line[0] == "variogram": variogram_info[line[1]] = float(line[2]) elif line[0] == "end": break elif line[0] == "mean": warnings.warn("'mean' attribute not supported, skipping",PyemuWarning) else: raise Exception("unrecognized line in structure definition:{0}".\ format(line[0])) assert numvariograms == len(variogram_info) return nugget,transform,variogram_info
python
{ "resource": "" }
q19975
read_sgems_variogram_xml
train
def read_sgems_variogram_xml(xml_file,return_type=GeoStruct): """ function to read an SGEMS-type variogram XML file into a GeoStruct Parameters ---------- xml_file : (str) SGEMS variogram XML file return_type : (object) the instance type to return. Default is GeoStruct Returns ------- GeoStruct : GeoStruct Example ------- ``>>>import pyemu`` ``>>>gs = pyemu.utils.geostats.read_sgems_variogram_xml("sgems.xml")`` """ try: import xml.etree.ElementTree as ET except Exception as e: print("error import elementtree, skipping...") VARTYPE = {1: SphVario, 2: ExpVario, 3: GauVario, 4: None} assert os.path.exists(xml_file) tree = ET.parse(xml_file) gs_model = tree.getroot() structures = [] variograms = [] nugget = 0.0 num_struct = 0 for key,val in gs_model.items(): #print(key,val) if str(key).lower() == "nugget": if len(val) > 0: nugget = float(val) if str(key).lower() == "structures_count": num_struct = int(val) if num_struct == 0: raise Exception("no structures found") if num_struct != 1: raise NotImplementedError() for structure in gs_model: vtype, contribution = None, None mx_range,mn_range = None, None x_angle,y_angle = None,None #struct_name = structure.tag for key,val in structure.items(): key = str(key).lower() if key == "type": vtype = str(val).lower() if vtype.startswith("sph"): vtype = SphVario elif vtype.startswith("exp"): vtype = ExpVario elif vtype.startswith("gau"): vtype = GauVario else: raise Exception("unrecognized variogram type:{0}".format(vtype)) elif key == "contribution": contribution = float(val) for item in structure: if item.tag.lower() == "ranges": mx_range = float(item.attrib["max"]) mn_range = float(item.attrib["min"]) elif item.tag.lower() == "angles": x_angle = float(item.attrib["x"]) y_angle = float(item.attrib["y"]) assert contribution is not None assert mn_range is not None assert mx_range is not None assert x_angle is not None assert y_angle is not None assert vtype is not None v = vtype(contribution=contribution,a=mx_range, anisotropy=mx_range/mn_range,bearing=(180.0/np.pi)*np.arctan2(x_angle,y_angle), name=structure.tag) return GeoStruct(nugget=nugget,variograms=[v])
python
{ "resource": "" }
q19976
gslib_2_dataframe
train
def gslib_2_dataframe(filename,attr_name=None,x_idx=0,y_idx=1): """ function to read a GSLIB point data file into a pandas.DataFrame Parameters ---------- filename : (str) GSLIB file attr_name : (str) the column name in the dataframe for the attribute. If None, GSLIB file can have only 3 columns. attr_name must be in the GSLIB file header x_idx : (int) the index of the x-coordinate information in the GSLIB file. Default is 0 (first column) y_idx : (int) the index of the y-coordinate information in the GSLIB file. Default is 1 (second column) Returns ------- df : pandas.DataFrame Raises ------ exception if attr_name is None and GSLIB file has more than 3 columns Note ---- assigns generic point names ("pt0, pt1, etc) Example ------- ``>>>import pyemu`` ``>>>df = pyemu.utiils.geostats.gslib_2_dataframe("prop.gslib",attr_name="hk")`` """ with open(filename,'r') as f: title = f.readline().strip() num_attrs = int(f.readline().strip()) attrs = [f.readline().strip() for _ in range(num_attrs)] if attr_name is not None: assert attr_name in attrs,"{0} not in attrs:{1}".format(attr_name,','.join(attrs)) else: assert len(attrs) == 3,"propname is None but more than 3 attrs in gslib file" attr_name = attrs[2] assert len(attrs) > x_idx assert len(attrs) > y_idx a_idx = attrs.index(attr_name) x,y,a = [],[],[] while True: line = f.readline() if line == '': break raw = line.strip().split() try: x.append(float(raw[x_idx])) y.append(float(raw[y_idx])) a.append(float(raw[a_idx])) except Exception as e: raise Exception("error paring line {0}: {1}".format(line,str(e))) df = pd.DataFrame({"x":x,"y":y,"value":a}) df.loc[:,"name"] = ["pt{0}".format(i) for i in range(df.shape[0])] df.index = df.name return df
python
{ "resource": "" }
q19977
load_sgems_exp_var
train
def load_sgems_exp_var(filename): """ read an SGEM experimental variogram into a sequence of pandas.DataFrames Parameters ---------- filename : (str) an SGEMS experimental variogram XML file Returns ------- dfs : list a list of pandas.DataFrames of x, y, pairs for each division in the experimental variogram """ assert os.path.exists(filename) import xml.etree.ElementTree as etree tree = etree.parse(filename) root = tree.getroot() dfs = {} for variogram in root: #print(variogram.tag) for attrib in variogram: #print(attrib.tag,attrib.text) if attrib.tag == "title": title = attrib.text.split(',')[0].split('=')[-1] elif attrib.tag == "x": x = [float(i) for i in attrib.text.split()] elif attrib.tag == "y": y = [float(i) for i in attrib.text.split()] elif attrib.tag == "pairs": pairs = [int(i) for i in attrib.text.split()] for item in attrib: print(item,item.tag) df = pd.DataFrame({"x":x,"y":y,"pairs":pairs}) df.loc[df.y<0.0,"y"] = np.NaN dfs[title] = df return dfs
python
{ "resource": "" }
q19978
GeoStruct.to_struct_file
train
def to_struct_file(self, f): """ write a PEST-style structure file Parameters ---------- f : (str or file handle) file to write the GeoStruct information to """ if isinstance(f, str): f = open(f,'w') f.write("STRUCTURE {0}\n".format(self.name)) f.write(" NUGGET {0}\n".format(self.nugget)) f.write(" NUMVARIOGRAM {0}\n".format(len(self.variograms))) for v in self.variograms: f.write(" VARIOGRAM {0} {1}\n".format(v.name,v.contribution)) f.write(" TRANSFORM {0}\n".format(self.transform)) f.write("END STRUCTURE\n\n") for v in self.variograms: v.to_struct_file(f)
python
{ "resource": "" }
q19979
GeoStruct.covariance
train
def covariance(self,pt0,pt1): """get the covariance between two points implied by the GeoStruct. This is used during the ordinary kriging process to get the RHS Parameters ---------- pt0 : (iterable length 2 of floats) pt1 : (iterable length 2 of floats) Returns ------- covariance : float the covariance between pt0 and pt1 implied by the GeoStruct """ #raise Exception() cov = self.nugget for vario in self.variograms: cov += vario.covariance(pt0,pt1) return cov
python
{ "resource": "" }
q19980
GeoStruct.covariance_points
train
def covariance_points(self,x0,y0,xother,yother): """ Get the covariance between point x0,y0 and the points contained in xother, yother. Parameters ---------- x0 : (float) x-coordinate y0 : (float) y-coordinate xother : (iterable of floats) x-coordinate of other points yother : (iterable of floats) y-coordinate of other points Returns ------- cov : numpy.ndarray a 1-D array of covariance between point x0,y0 and the points contained in xother, yother. len(cov) = len(xother) = len(yother) """ cov = np.zeros((len(xother))) + self.nugget for v in self.variograms: cov += v.covariance_points(x0,y0,xother,yother) return cov
python
{ "resource": "" }
q19981
GeoStruct.sill
train
def sill(self): """ get the sill of the GeoStruct Return ------ sill : float the sill of the (nested) GeoStruct, including nugget and contribution from each variogram """ sill = self.nugget for v in self.variograms: sill += v.contribution return sill
python
{ "resource": "" }
q19982
GeoStruct.plot
train
def plot(self,**kwargs): """ make a cheap plot of the GeoStruct Parameters ---------- **kwargs : (dict) keyword arguments to use for plotting. Returns ------- ax : matplotlib.pyplot.axis the axis with the GeoStruct plot Note ---- optional arguments include "ax" (an existing axis), "individuals" (plot each variogram on a separate axis), "legend" (add a legend to the plot(s)). All other kwargs are passed to matplotlib.pyplot.plot() """ # if "ax" in kwargs: ax = kwargs.pop("ax") else: try: import matplotlib.pyplot as plt except Exception as e: raise Exception("error importing matplotlib: {0}".format(str(e))) ax = plt.subplot(111) legend = kwargs.pop("legend",False) individuals = kwargs.pop("individuals",False) xmx = max([v.a*3.0 for v in self.variograms]) x = np.linspace(0,xmx,100) y = np.zeros_like(x) for v in self.variograms: yv = v.inv_h(x) if individuals: ax.plot(x,yv,label=v.name,**kwargs) y += yv y += self.nugget ax.plot(x,y,label=self.name,**kwargs) if legend: ax.legend() ax.set_xlabel("distance") ax.set_ylabel("$\gamma$") return ax
python
{ "resource": "" }
q19983
OrdinaryKrige.check_point_data_dist
train
def check_point_data_dist(self, rectify=False): """ check for point_data entries that are closer than EPSILON distance - this will cause a singular kriging matrix. Parameters ---------- rectify : (boolean) flag to fix the problems with point_data by dropping additional points that are closer than EPSILON distance. Default is False Note ---- this method will issue warnings for points that are closer than EPSILON distance """ ptx_array = self.point_data.x.values pty_array = self.point_data.y.values ptnames = self.point_data.name.values drop = [] for i in range(self.point_data.shape[0]): ix,iy,iname = ptx_array[i],pty_array[i],ptnames[i] dist = pd.Series((ptx_array[i+1:] - ix) ** 2 + (pty_array[i+1:] - iy) ** 2, ptnames[i+1:]) if dist.min() < EPSILON**2: print(iname,ix,iy) warnings.warn("points {0} and {1} are too close. This will cause a singular kriging matrix ".\ format(iname,dist.idxmin()),PyemuWarning) drop_idxs = dist.loc[dist<=EPSILON**2] drop.extend([pt for pt in list(drop_idxs.index) if pt not in drop]) if rectify and len(drop) > 0: print("rectifying point data by removing the following points: {0}".format(','.join(drop))) print(self.point_data.shape) self.point_data = self.point_data.loc[self.point_data.index.map(lambda x: x not in drop),:] print(self.point_data.shape)
python
{ "resource": "" }
q19984
Vario2d.to_struct_file
train
def to_struct_file(self, f): """ write the Vario2d to a PEST-style structure file Parameters ---------- f : (str or file handle) item to write to """ if isinstance(f, str): f = open(f,'w') f.write("VARIOGRAM {0}\n".format(self.name)) f.write(" VARTYPE {0}\n".format(self.vartype)) f.write(" A {0}\n".format(self.a)) f.write(" ANISOTROPY {0}\n".format(self.anisotropy)) f.write(" BEARING {0}\n".format(self.bearing)) f.write("END VARIOGRAM\n\n")
python
{ "resource": "" }
q19985
Vario2d.rotation_coefs
train
def rotation_coefs(self): """ get the rotation coefficents in radians Returns ------- rotation_coefs : list the rotation coefficients implied by Vario2d.bearing """ return [np.cos(self.bearing_rads), np.sin(self.bearing_rads), -1.0*np.sin(self.bearing_rads), np.cos(self.bearing_rads)]
python
{ "resource": "" }
q19986
Vario2d.plot
train
def plot(self,**kwargs): """ get a cheap plot of the Vario2d Parameters ---------- **kwargs : (dict) keyword arguments to use for plotting Returns ------- ax : matplotlib.pyplot.axis Note ---- optional arguments in kwargs include "ax" (existing matplotlib.pyplot.axis). Other kwargs are passed to matplotlib.pyplot.plot() """ try: import matplotlib.pyplot as plt except Exception as e: raise Exception("error importing matplotlib: {0}".format(str(e))) ax = kwargs.pop("ax",plt.subplot(111)) x = np.linspace(0,self.a*3,100) y = self.inv_h(x) ax.set_xlabel("distance") ax.set_ylabel("$\gamma$") ax.plot(x,y,**kwargs) return ax
python
{ "resource": "" }
q19987
Vario2d.add_sparse_covariance_matrix
train
def add_sparse_covariance_matrix(self,x,y,names,iidx,jidx,data): """build a pyemu.SparseMatrix instance implied by Vario2d Parameters ---------- x : (iterable of floats) x-coordinate locations y : (iterable of floats) y-coordinate locations names : (iterable of str) names of locations. If None, cov must not be None iidx : 1-D ndarray i row indices jidx : 1-D ndarray j col indices data : 1-D ndarray nonzero entries Returns ------- None """ if not isinstance(x, np.ndarray): x = np.array(x) if not isinstance(y, np.ndarray): y = np.array(y) assert x.shape[0] == y.shape[0] assert x.shape[0] == len(names) # c = np.zeros((len(names), len(names))) # np.fill_diagonal(c, self.contribution) # cov = Cov(x=c, names=names) # elif cov is not None: # assert cov.shape[0] == x.shape[0] # names = cov.row_names # c = np.zeros((len(names), 1)) + self.contribution # cont = Cov(x=c, names=names, isdiagonal=True) # cov += cont # # else: # raise Exception("Vario2d.covariance_matrix() requires either" + # "names or cov arg") # rc = self.rotation_coefs for i,name in enumerate(names): iidx.append(i) jidx.append(i) data.append(self.contribution) for i1, (n1, x1, y1) in enumerate(zip(names, x, y)): dx = x1 - x[i1 + 1:] dy = y1 - y[i1 + 1:] dxx, dyy = self._apply_rotation(dx, dy) h = np.sqrt(dxx * dxx + dyy * dyy) h[h < 0.0] = 0.0 cv = self._h_function(h) if np.any(np.isnan(cv)): raise Exception("nans in cv for i1 {0}".format(i1)) #cv[h>self.a] = 0.0 j = list(np.arange(i1+1,x.shape[0])) i = [i1] * len(j) iidx.extend(i) jidx.extend(j) data.extend(list(cv)) # replicate across the diagonal iidx.extend(j) jidx.extend(i) data.extend(list(cv))
python
{ "resource": "" }
q19988
Vario2d.covariance_matrix
train
def covariance_matrix(self,x,y,names=None,cov=None): """build a pyemu.Cov instance implied by Vario2d Parameters ---------- x : (iterable of floats) x-coordinate locations y : (iterable of floats) y-coordinate locations names : (iterable of str) names of locations. If None, cov must not be None cov : (pyemu.Cov) an existing Cov instance. Vario2d contribution is added to cov Returns ------- cov : pyemu.Cov Note ---- either names or cov must not be None. """ if not isinstance(x,np.ndarray): x = np.array(x) if not isinstance(y,np.ndarray): y = np.array(y) assert x.shape[0] == y.shape[0] if names is not None: assert x.shape[0] == len(names) c = np.zeros((len(names),len(names))) np.fill_diagonal(c,self.contribution) cov = Cov(x=c,names=names) elif cov is not None: assert cov.shape[0] == x.shape[0] names = cov.row_names c = np.zeros((len(names),1)) + self.contribution cont = Cov(x=c,names=names,isdiagonal=True) cov += cont else: raise Exception("Vario2d.covariance_matrix() requires either" + "names or cov arg") rc = self.rotation_coefs for i1,(n1,x1,y1) in enumerate(zip(names,x,y)): dx = x1 - x[i1+1:] dy = y1 - y[i1+1:] dxx,dyy = self._apply_rotation(dx,dy) h = np.sqrt(dxx*dxx + dyy*dyy) h[h<0.0] = 0.0 h = self._h_function(h) if np.any(np.isnan(h)): raise Exception("nans in h for i1 {0}".format(i1)) cov.x[i1,i1+1:] += h for i in range(len(names)): cov.x[i+1:,i] = cov.x[i,i+1:] return cov
python
{ "resource": "" }
q19989
Vario2d._apply_rotation
train
def _apply_rotation(self,dx,dy): """ private method to rotate points according to Vario2d.bearing and Vario2d.anisotropy Parameters ---------- dx : (float or numpy.ndarray) x-coordinates to rotate dy : (float or numpy.ndarray) y-coordinates to rotate Returns ------- dxx : (float or numpy.ndarray) rotated x-coordinates dyy : (float or numpy.ndarray) rotated y-coordinates """ if self.anisotropy == 1.0: return dx,dy rcoefs = self.rotation_coefs dxx = (dx * rcoefs[0]) +\ (dy * rcoefs[1]) dyy = ((dx * rcoefs[2]) +\ (dy * rcoefs[3])) *\ self.anisotropy return dxx,dyy
python
{ "resource": "" }
q19990
Vario2d.covariance_points
train
def covariance_points(self,x0,y0,xother,yother): """ get the covariance between base point x0,y0 and other points xother,yother implied by Vario2d Parameters ---------- x0 : (float) x-coordinate of base point y0 : (float) y-coordinate of base point xother : (float or numpy.ndarray) x-coordinates of other points yother : (float or numpy.ndarray) y-coordinates of other points Returns ------- cov : numpy.ndarray covariance between base point and other points implied by Vario2d. Note ---- len(cov) = len(xother) = len(yother) """ dx = x0 - xother dy = y0 - yother dxx,dyy = self._apply_rotation(dx,dy) h = np.sqrt(dxx*dxx + dyy*dyy) return self._h_function(h)
python
{ "resource": "" }
q19991
Vario2d.covariance
train
def covariance(self,pt0,pt1): """ get the covarince between two points implied by Vario2d Parameters ---------- pt0 : (iterable of len 2) first point x and y pt1 : (iterable of len 2) second point x and y Returns ------- cov : float covariance between pt0 and pt1 """ x = np.array([pt0[0],pt1[0]]) y = np.array([pt0[1],pt1[1]]) names = ["n1","n2"] return self.covariance_matrix(x,y,names=names).x[0,1]
python
{ "resource": "" }
q19992
ExpVario._h_function
train
def _h_function(self,h): """ private method exponential variogram "h" function Parameters ---------- h : (float or numpy.ndarray) distance(s) Returns ------- h_function : float or numpy.ndarray the value of the "h" function implied by the ExpVario """ return self.contribution * np.exp(-1.0 * h / self.a)
python
{ "resource": "" }
q19993
GauVario._h_function
train
def _h_function(self,h): """ private method for the gaussian variogram "h" function Parameters ---------- h : (float or numpy.ndarray) distance(s) Returns ------- h_function : float or numpy.ndarray the value of the "h" function implied by the GauVario """ hh = -1.0 * (h * h) / (self.a * self.a) return self.contribution * np.exp(hh)
python
{ "resource": "" }
q19994
SphVario._h_function
train
def _h_function(self,h): """ private method for the spherical variogram "h" function Parameters ---------- h : (float or numpy.ndarray) distance(s) Returns ------- h_function : float or numpy.ndarray the value of the "h" function implied by the SphVario """ hh = h / self.a h = self.contribution * (1.0 - (hh * (1.5 - (0.5 * hh * hh)))) h[hh > 1.0] = 0.0 return h
python
{ "resource": "" }
q19995
Logger.statement
train
def statement(self,phrase): """ log a one time statement Parameters ---------- phrase : str statement to log """ t = datetime.now() s = str(t) + ' ' + str(phrase) + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush()
python
{ "resource": "" }
q19996
Logger.log
train
def log(self,phrase): """log something that happened. The first time phrase is passed the start time is saved. The second time the phrase is logged, the elapsed time is written Parameters ---------- phrase : str the thing that happened """ pass t = datetime.now() if phrase in self.items.keys(): s = str(t) + ' finished: ' + str(phrase) + " took: " + \ str(t - self.items[phrase]) + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush() self.items.pop(phrase) else: s = str(t) + ' starting: ' + str(phrase) + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush() self.items[phrase] = copy.deepcopy(t)
python
{ "resource": "" }
q19997
Logger.warn
train
def warn(self,message): """write a warning to the log file. Parameters ---------- message : str the warning text """ s = str(datetime.now()) + " WARNING: " + message + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush warnings.warn(s,PyemuWarning)
python
{ "resource": "" }
q19998
Logger.lraise
train
def lraise(self,message): """log an exception, close the log file, then raise the exception Parameters ---------- message : str the exception message Raises ------ exception with message """ s = str(datetime.now()) + " ERROR: " + message + '\n' print(s,end='') if self.filename: self.f.write(s) self.f.flush self.f.close() raise Exception(message)
python
{ "resource": "" }
q19999
modflow_pval_to_template_file
train
def modflow_pval_to_template_file(pval_file,tpl_file=None): """write a template file for a modflow parameter value file. Uses names in the first column in the pval file as par names. Parameters ---------- pval_file : str parameter value file tpl_file : str, optional template file to write. If None, use <pval_file>.tpl. Default is None Returns ------- df : pandas.DataFrame pandas DataFrame with control file parameter information """ if tpl_file is None: tpl_file = pval_file + ".tpl" pval_df = pd.read_csv(pval_file,delim_whitespace=True, header=None,skiprows=2, names=["parnme","parval1"]) pval_df.index = pval_df.parnme pval_df.loc[:,"tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x)) with open(tpl_file,'w') as f: f.write("ptf ~\n#pval template file from pyemu\n") f.write("{0:10d} #NP\n".format(pval_df.shape[0])) f.write(pval_df.loc[:,["parnme","tpl"]].to_string(col_space=0, formatters=[SFMT,SFMT], index=False, header=False, justify="left")) return pval_df
python
{ "resource": "" }