INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Returns a dictionary for storage.
def _store(self): """Returns a dictionary for storage. Every element in the dictionary except for 'explored_data' is a pickle dump. Reusage of objects is identified over the object id, i.e. python's built-in id function. 'explored_data' contains the references to the objects to be able to recall the order of objects later on. """ store_dict = {} if self._data is not None: dump = pickle.dumps(self._data, protocol=self.v_protocol) store_dict['data'] = dump store_dict[PickleParameter.PROTOCOL] = self.v_protocol if self.f_has_range(): store_dict['explored_data'] = \ ObjectTable(columns=['idx'], index=list(range(len(self)))) smart_dict = {} count = 0 for idx, val in enumerate(self._explored_range): obj_id = id(val) if obj_id in smart_dict: name_id = smart_dict[obj_id] add = False else: name_id = count add = True name = self._build_name(name_id) store_dict['explored_data']['idx'][idx] = name_id if add: store_dict[name] = pickle.dumps(val, protocol=self.v_protocol) smart_dict[obj_id] = name_id count += 1 self._locked = True return store_dict
Reconstructs objects from the pickle dumps in load_dict.
def _load(self, load_dict): """Reconstructs objects from the pickle dumps in `load_dict`. The 'explored_data' entry in `load_dict` is used to reconstruct the exploration range in the correct order. Sets the `v_protocol` property to the protocol used to store 'data'. """ if self.v_locked: raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name) if 'data' in load_dict: dump = load_dict['data'] self._data = pickle.loads(dump) else: self._logger.warning('Your parameter `%s` is empty, ' 'I did not find any data on disk.' % self.v_full_name) try: self.v_protocol = load_dict[PickleParameter.PROTOCOL] except KeyError: # For backwards compatibility self.v_protocol = PickleParameter._get_protocol(dump) if 'explored_data' in load_dict: explore_table = load_dict['explored_data'] name_col = explore_table['idx'] explore_list = [] for name_id in name_col: arrayname = self._build_name(name_id) loaded = pickle.loads(load_dict[arrayname]) explore_list.append(loaded) self._explored_range = explore_list self._explored = True self._default = self._data self._locked = True
Translates integer indices into the appropriate names
def f_translate_key(self, key): """Translates integer indices into the appropriate names""" if isinstance(key, int): if key == 0: key = self.v_name else: key = self.v_name + '_%d' % key return key
Summarizes data handled by the result as a string.
def f_val_to_str(self): """Summarizes data handled by the result as a string. Calls `__repr__` on all handled data. Data is NOT ordered. Truncates the string if it is longer than :const:`pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH` :return: string """ resstrlist = [] strlen = 0 for key in self._data: val = self._data[key] resstr = '%s=%s, ' % (key, repr(val)) resstrlist.append(resstr) strlen += len(resstr) if strlen > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH: break return_string = "".join(resstrlist) if len(return_string) > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH: return_string =\ return_string[0:pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH - 3] + '...' else: return_string = return_string[0:-2] # Delete the last `, ` return return_string
Returns all handled data as a dictionary.
def f_to_dict(self, copy=True): """Returns all handled data as a dictionary. :param copy: Whether the original dictionary or a shallow copy is returned. :return: Data dictionary """ if copy: return self._data.copy() else: return self._data
Method to put data into the result.
def f_set(self, *args, **kwargs): """ Method to put data into the result. :param args: The first positional argument is stored with the name of the result. Following arguments are stored with `name_X` where `X` is the position of the argument. :param kwargs: Arguments are stored with the key as name. :raises: TypeError if outer data structure is not understood. Example usage: >>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!') >>> res.f_set(333,42.0, mystring='String!') >>> res.f_get('myresult') 333 >>> res.f_get('myresult_1') 42.0 >>> res.f_get(1) 42.0 >>> res.f_get('mystring') 'String!' """ if args and self.v_name is None: raise AttributeError('Cannot set positional value because I do not have a name!') for idx, arg in enumerate(args): valstr = self.f_translate_key(idx) self.f_set_single(valstr, arg) for key, arg in kwargs.items(): self.f_set_single(key, arg)
Returns items handled by the result.
def f_get(self, *args): """Returns items handled by the result. If only a single name is given, a single data item is returned. If several names are given, a list is returned. For integer inputs the result returns `resultname_X`. If the result contains only a single entry you can call `f_get()` without arguments. If you call `f_get()` and the result contains more than one element a ValueError is thrown. If the requested item(s) cannot be found an AttributeError is thrown. :param args: strings-names or integers :return: Single data item or tuple of data Example: >>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!' \ [1000,2000], {'a':'b','c':333}, hitchhiker='Arthur Dent') >>> res.f_get('hitchhiker') 'Arthur Dent' >>> res.f_get(0) [1000,2000] >>> res.f_get('hitchhiker', 'myresult') ('Arthur Dent', [1000,2000]) """ if len(args) == 0: if len(self._data) == 1: return list(self._data.values())[0] elif len(self._data) > 1: raise ValueError('Your result `%s` contains more than one entry: ' '`%s` Please use >>f_get<< with one of these.' % (self.v_full_name, str(list(self._data.keys())))) else: raise AttributeError('Your result `%s` is empty, cannot access data.' % self.v_full_name) result_list = [] for name in args: name = self.f_translate_key(name) if not name in self._data: if name == 'data' and len(self._data) == 1: return self._data[list(self._data.keys())[0]] else: raise AttributeError('`%s` is not part of your result `%s`.' % (name, self.v_full_name)) result_list.append(self._data[name]) if len(args) == 1: return result_list[0] else: return result_list
Sets a single data item of the result.
def f_set_single(self, name, item): """Sets a single data item of the result. Raises TypeError if the type of the outer data structure is not understood. Note that the type check is shallow. For example, if the data item is a list, the individual list elements are NOT checked whether their types are appropriate. :param name: The name of the data item :param item: The data item :raises: TypeError Example usage: >>> res.f_set_single('answer', 42) >>> res.f_get('answer') 42 """ if self.v_stored: self._logger.debug('You are changing an already stored result. If ' 'you not explicitly overwrite the data on disk, this change ' 'might be lost and not propagated to disk.') if self._supports(item): # self._check_if_empty(item, name) # No longer needed if name in self._data: self._logger.debug('Replacing `%s` in result `%s`.' % (name, self.v_full_name)) self._data[name] = item else: raise TypeError('Your result `%s` of type `%s` is not supported.' % (name, str(type(item))))
Removes * args from the result
def f_remove(self, *args): """Removes `*args` from the result""" for arg in args: arg = self.f_translate_key(arg) if arg in self._data: del self._data[arg] else: raise AttributeError('Your result `%s` does not contain %s.' % (self.name_, arg))
Supports everything of parent class and csr csc bsr and dia sparse matrices.
def _supports(self, item): """Supports everything of parent class and csr, csc, bsr, and dia sparse matrices.""" if SparseParameter._is_supported_matrix(item): return True else: return super(SparseResult, self)._supports(item)
Returns a storage dictionary understood by the storage service.
def _store(self): """Returns a storage dictionary understood by the storage service. Sparse matrices are extracted similar to the :class:`~pypet.parameter.SparseParameter` and marked with the identifier `__spsp__`. """ store_dict = {} for key in self._data: val = self._data[key] if SparseParameter._is_supported_matrix(val): data_list, name_list, hash_tuple = SparseParameter._serialize_matrix(val) rename_list = ['%s%s%s' % (key, SparseParameter.IDENTIFIER, name) for name in name_list] is_dia = int(len(rename_list) == 4) store_dict[key + SparseResult.IDENTIFIER + 'is_dia'] = is_dia for idx, name in enumerate(rename_list): store_dict[name] = data_list[idx] else: store_dict[key] = val return store_dict
Loads data from load_dict
def _load(self, load_dict): """Loads data from `load_dict` Reconstruction of sparse matrices similar to the :class:`~pypet.parameter.SparseParameter`. """ for key in list(load_dict.keys()): # We delete keys over time: if key in load_dict: if SparseResult.IDENTIFIER in key: new_key = key.split(SparseResult.IDENTIFIER)[0] is_dia = load_dict.pop(new_key + SparseResult.IDENTIFIER + 'is_dia') name_list = SparseParameter._get_name_list(is_dia) rename_list = ['%s%s%s' % (new_key, SparseResult.IDENTIFIER, name) for name in name_list] data_list = [load_dict.pop(name) for name in rename_list] matrix = SparseParameter._reconstruct_matrix(data_list) self._data[new_key] = matrix else: self._data[key] = load_dict[key]
Adds a single data item to the pickle result.
def f_set_single(self, name, item): """Adds a single data item to the pickle result. Note that it is NOT checked if the item can be pickled! """ if self.v_stored: self._logger.debug('You are changing an already stored result. If ' 'you not explicitly overwrite the data on disk, this change ' 'might be lost and not propagated to disk.') if name == PickleResult.PROTOCOL: raise AttributeError('You cannot name an entry `%s`' % PickleResult.PROTOCOL) self._data[name] = item
Returns a dictionary containing pickle dumps
def _store(self): """Returns a dictionary containing pickle dumps""" store_dict = {} for key, val in self._data.items(): store_dict[key] = pickle.dumps(val, protocol=self.v_protocol) store_dict[PickleResult.PROTOCOL] = self.v_protocol return store_dict
Reconstructs all items from the pickle dumps in load_dict.
def _load(self, load_dict): """Reconstructs all items from the pickle dumps in `load_dict`. Sets the `v_protocol` property to the protocol of the first reconstructed item. """ try: self.v_protocol = load_dict.pop(PickleParameter.PROTOCOL) except KeyError: # For backwards compatibility dump = next(load_dict.values()) self.v_protocol = PickleParameter._get_protocol(dump) for key in load_dict: val = load_dict[key] self._data[key] = pickle.loads(val)
Simply merge all trajectories in the working directory
def main(): """Simply merge all trajectories in the working directory""" folder = os.getcwd() print('Merging all files') merge_all_in_folder(folder, delete_other_files=True, # We will only keep one trajectory dynamic_imports=FunctionParameter, backup=False) print('Done')
Uploads a file
def upload_file(filename, session): """ Uploads a file """ print('Uploading file %s' % filename) outfilesource = os.path.join(os.getcwd(), filename) outfiletarget = 'sftp://' + ADDRESS + WORKING_DIR out = saga.filesystem.File(outfilesource, session=session, flags=OVERWRITE) out.copy(outfiletarget) print('Transfer of `%s` to `%s` successful' % (filename, outfiletarget))
Downloads a file
def download_file(filename, session): """ Downloads a file """ print('Downloading file %s' % filename) infilesource = os.path.join('sftp://' + ADDRESS + WORKING_DIR, filename) infiletarget = os.path.join(os.getcwd(), filename) incoming = saga.filesystem.File(infilesource, session=session, flags=OVERWRITE) incoming.copy(infiletarget) print('Transfer of `%s` to `%s` successful' % (filename, infiletarget))
Creates and returns a new SAGA session
def create_session(): """ Creates and returns a new SAGA session """ ctx = saga.Context("UserPass") ctx.user_id = USER ctx.user_pass = PASSWORD session = saga.Session() session.add_context(ctx) return session
Merges all trajectories found in the working directory
def merge_trajectories(session): """ Merges all trajectories found in the working directory """ jd = saga.job.Description() jd.executable = 'python' jd.arguments = ['merge_trajs.py'] jd.output = "mysagajob_merge.stdout" jd.error = "mysagajob_merge.stderr" jd.working_directory = WORKING_DIR js = saga.job.Service('ssh://' + ADDRESS, session=session) myjob = js.create_job(jd) print("\n...starting job...\n") # Now we can start our job. myjob.run() print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...waiting for job...\n") # wait for the job to either finish or fail myjob.wait() print("Job State : %s" % (myjob.state)) print("Exitcode : %s" % (myjob.exit_code))
Starts all jobs and runs the_task. py in batches.
def start_jobs(session): """ Starts all jobs and runs `the_task.py` in batches. """ js = saga.job.Service('ssh://' + ADDRESS, session=session) batches = range(3) jobs = [] for batch in batches: print('Starting batch %d' % batch) jd = saga.job.Description() jd.executable = 'python' jd.arguments = ['the_task.py --batch=' + str(batch)] jd.output = "mysagajob.stdout" + str(batch) jd.error = "mysagajob.stderr" + str(batch) jd.working_directory = WORKING_DIR myjob = js.create_job(jd) print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...starting job...\n") myjob.run() jobs.append(myjob) for myjob in jobs: print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...waiting for job...\n") # wait for the job to either finish or fail myjob.wait() print("Job State : %s" % (myjob.state)) print("Exitcode : %s" % (myjob.exit_code))
Sophisticated simulation of multiplication
def multiply(traj): """Sophisticated simulation of multiplication""" z=traj.x*traj.y traj.f_add_result('z',z=z, comment='I am the product of two reals!')
Main function to protect the * entry point * of the program.
def main(): """Main function to protect the *entry point* of the program. If you want to use multiprocessing with SCOOP you need to wrap your main code creating an environment into a function. Otherwise the newly started child processes will re-execute the code and throw errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls). """ # Create an environment that handles running. # Let's enable multiprocessing with scoop: filename = os.path.join('hdf5', 'example_21.hdf5') env = Environment(trajectory='Example_21_SCOOP', filename=filename, file_title='Example_21_SCOOP', log_stdout=True, comment='Multiprocessing example using SCOOP!', multiproc=True, freeze_input=True, # We want to save overhead and freeze input use_scoop=True, # Yes we want SCOOP! wrap_mode=pypetconstants.WRAP_MODE_LOCAL, # SCOOP only works with 'LOCAL' # or 'NETLOCK' wrapping overwrite_file=True) # Get the trajectory from the environment traj = env.trajectory # Add both parameters traj.f_add_parameter('x', 1.0, comment='I am the first dimension!') traj.f_add_parameter('y', 1.0, comment='I am the second dimension!') # Explore the parameters with a cartesian product, but we want to explore a bit more traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)], 'y':[float(y) for y in range(20)]})) # Run the simulation env.run(multiply) # Let's check that all runs are completed! assert traj.f_is_completed() # Finally disable logging and close all log-files env.disable_logging()
Runs a simulation of a model neuron.
def run_neuron(traj): """Runs a simulation of a model neuron. :param traj: Container with all parameters. :return: An estimate of the firing rate of the neuron """ # Extract all parameters from `traj` V_init = traj.par.neuron.V_init I = traj.par.neuron.I tau_V = traj.par.neuron.tau_V tau_ref = traj.par.neuron.tau_ref dt = traj.par.simulation.dt duration = traj.par.simulation.duration steps = int(duration / float(dt)) # Create some containers for the Euler integration V_array = np.zeros(steps) V_array[0] = V_init spiketimes = [] # List to collect all times of action potentials # Do the Euler integration: print('Starting Euler Integration') for step in range(1, steps): if V_array[step-1] >= 1: # The membrane potential crossed the threshold and we mark this as # an action potential V_array[step] = 0 spiketimes.append((step-1)*dt) elif spiketimes and step * dt - spiketimes[-1] <= tau_ref: # We are in the refractory period, so we simply clamp the voltage # to 0 V_array[step] = 0 else: # Euler Integration step: dV = -1/tau_V * V_array[step-1] + I V_array[step] = V_array[step-1] + dV*dt print('Finished Euler Integration') # Add the voltage trace and spike times traj.f_add_result('neuron.$', V=V_array, nspikes=len(spiketimes), comment='Contains the development of the membrane potential over time ' 'as well as the number of spikes.') # This result will be renamed to `traj.results.neuron.run_XXXXXXXX`. # And finally we return the estimate of the firing rate return len(spiketimes) / float(traj.par.simulation.duration) *1000
Postprocessing sorts computed firing rates into a table
def neuron_postproc(traj, result_list): """Postprocessing, sorts computed firing rates into a table :param traj: Container for results and parameters :param result_list: List of tuples, where first entry is the run index and second is the actual result of the corresponding run. :return: """ # Let's create a pandas DataFrame to sort the computed firing rate according to the # parameters. We could have also used a 2D numpy array. # But a pandas DataFrame has the advantage that we can index into directly with # the parameter values without translating these into integer indices. I_range = traj.par.neuron.f_get('I').f_get_range() ref_range = traj.par.neuron.f_get('tau_ref').f_get_range() I_index = sorted(set(I_range)) ref_index = sorted(set(ref_range)) rates_frame = pd.DataFrame(columns=ref_index, index=I_index) # This frame is basically a two dimensional table that we can index with our # parameters # Now iterate over the results. The result list is a list of tuples, with the # run index at first position and our result at the second for result_tuple in result_list: run_idx = result_tuple[0] firing_rates = result_tuple[1] I_val = I_range[run_idx] ref_val = ref_range[run_idx] rates_frame.loc[I_val, ref_val] = firing_rates # Put the firing rate into the # data frame # Finally we going to store our new firing rate table into the trajectory traj.f_add_result('summary.firing_rates', rates_frame=rates_frame, comment='Contains a pandas data frame with all firing rates.')
Adds all parameters to traj
def add_parameters(traj): """Adds all parameters to `traj`""" print('Adding Parameters') traj.f_add_parameter('neuron.V_init', 0.0, comment='The initial condition for the ' 'membrane potential') traj.f_add_parameter('neuron.I', 0.0, comment='The externally applied current.') traj.f_add_parameter('neuron.tau_V', 10.0, comment='The membrane time constant in milliseconds') traj.f_add_parameter('neuron.tau_ref', 5.0, comment='The refractory period in milliseconds ' 'where the membrane potnetial ' 'is clamped.') traj.f_add_parameter('simulation.duration', 1000.0, comment='The duration of the experiment in ' 'milliseconds.') traj.f_add_parameter('simulation.dt', 0.1, comment='The step size of an Euler integration step.')
Explores different values of I and tau_ref.
def add_exploration(traj): """Explores different values of `I` and `tau_ref`.""" print('Adding exploration of I and tau_ref') explore_dict = {'neuron.I': np.arange(0, 1.01, 0.01).tolist(), 'neuron.tau_ref': [5.0, 7.5, 10.0]} explore_dict = cartesian_product(explore_dict, ('neuron.tau_ref', 'neuron.I')) # The second argument, the tuple, specifies the order of the cartesian product, # The variable on the right most side changes fastest and defines the # 'inner for-loop' of the cartesian product traj.f_explore(explore_dict)
Runs a network before the actual experiment.
def execute_network_pre_run(self, traj, network, network_dict, component_list, analyser_list): """Runs a network before the actual experiment. Called by a :class:`~pypet.brian2.network.NetworkManager`. Similar to :func:`~pypet.brian2.network.NetworkRunner.run_network`. Subruns and their durations are extracted from the trajectory. All :class:`~pypet.brian2.parameter.Brian2Parameter` instances found under `traj.parameters.simulation.pre_durations` (default, you can change the name of the group where to search for durations at runner initialisation). The order is determined from the `v_annotations.order` attributes. There must be at least one subrun in the trajectory, otherwise an AttributeError is thrown. If two subruns equal in their order property a RuntimeError is thrown. :param traj: Trajectory container :param network: BRIAN2 network :param network_dict: Dictionary of items shared among all components :param component_list: List of :class:`~pypet.brian2.network.NetworkComponent` objects :param analyser_list: List of :class:`~pypet.brian2.network.NetworkAnalyser` objects """ self._execute_network_run(traj, network, network_dict, component_list, analyser_list, pre_run=True)
Runs a network in an experimental run.
def execute_network_run(self, traj, network, network_dict, component_list, analyser_list): """Runs a network in an experimental run. Called by a :class:`~pypet.brian2.network.NetworkManager`. A network run is divided into several subruns which are defined as :class:`~pypet.brian2.parameter.Brian2Parameter` instances. These subruns are extracted from the trajectory. All :class:`~pypet.brian2.parameter.Brian2Parameter` instances found under `traj.parameters.simulation.durations` (default, you can change the name of the group where to search for durations at runner initialisation). The order is determined from the `v_annotations.order` attributes. An error is thrown if no orders attribute can be found or if two parameters have the same order. There must be at least one subrun in the trajectory, otherwise an AttributeError is thrown. If two subruns equal in their order property a RuntimeError is thrown. For every subrun the following steps are executed: 1. Calling :func:`~pypet.brian2.network.NetworkComponent.add_to_network` for every every :class:`~pypet.brian2.network.NetworkComponent` in the order as they were passed to the :class:`~pypet.brian2.network.NetworkManager`. 2. Calling :func:`~pypet.brian2.network.NetworkComponent.add_to_network` for every every :class:`~pypet.brian2.network.NetworkAnalyser` in the order as they were passed to the :class:`~pypet.brian2.network.NetworkManager`. 3. Calling :func:`~pypet.brian2.network.NetworkComponent.add_to_network` of the NetworkRunner itself (usually the network runner should not add or remove anything from the network, but this step is executed for completeness). 4. Running the BRIAN2 network for the duration of the current subrun by calling the network's `run` function. 5. Calling :func:`~pypet.brian2.network.NetworkAnalyser.analyse` for every every :class:`~pypet.brian2.network.NetworkAnalyser` in the order as they were passed to the :class:`~pypet.brian2.network.NetworkManager`. 6. Calling :func:`~pypet.brian2.network.NetworkComponent.remove_from_network` of the NetworkRunner itself (usually the network runner should not add or remove anything from the network, but this step is executed for completeness). 7. Calling :func:`~pypet.brian2.network.NetworkComponent.remove_from_network` for every every :class:`~pypet.brian2.network.NetworkAnalyser` in the order as they were passed to the :class:`~pypet.brian2.network.NetworkManager` 8. Calling :func:`~pypet.brian2.network.NetworkComponent.remove_from_network` for every every :class:`~pypet.brian2.network.NetworkComponent` in the order as they were passed to the :class:`~pypet.brian2.network.NetworkManager`. These 8 steps are repeated for every subrun in the `subrun_list`. The `subrun_list` passed to all `add_to_network`, `analyse` and `remove_from_network` methods can be modified within these functions to potentially alter the order of execution or even erase or add upcoming subruns if necessary. For example, a NetworkAnalyser checks for epileptic pathological activity and cancels all coming subruns in case of undesired network dynamics. :param traj: Trajectory container :param network: BRIAN2 network :param network_dict: Dictionary of items shared among all components :param component_list: List of :class:`~pypet.brian2.network.NetworkComponent` objects :param analyser_list: List of :class:`~pypet.brian2.network.NetworkAnalyser` objects """ self._execute_network_run(traj, network, network_dict, component_list, analyser_list, pre_run=False)
Extracts subruns from the trajectory.
def _extract_subruns(self, traj, pre_run=False): """Extracts subruns from the trajectory. :param traj: Trajectory container :param pre_run: Boolean whether current run is regular or a pre-run :raises: RuntimeError if orders are duplicates or even missing """ if pre_run: durations_list = traj.f_get_all(self._pre_durations_group_name) else: durations_list = traj.f_get_all(self._durations_group_name) subruns = {} orders = [] for durations in durations_list: for duration_param in durations.f_iter_leaves(with_links=False): if 'order' in duration_param.v_annotations: order = duration_param.v_annotations.order else: raise RuntimeError('Your duration parameter %s has no order. Please add ' 'an order in `v_annotations.order`.' % duration_param.v_full_name) if order in subruns: raise RuntimeError('Your durations must differ in their order, there are two ' 'with order %d.' % order) else: subruns[order] = duration_param orders.append(order) return [subruns[order] for order in sorted(orders)]
Generic execute_network_run function handles experimental runs as well as pre - runs.
def _execute_network_run(self, traj, network, network_dict, component_list, analyser_list, pre_run=False): """Generic `execute_network_run` function, handles experimental runs as well as pre-runs. See also :func:`~pypet.brian2.network.NetworkRunner.execute_network_run` and :func:`~pypet.brian2.network.NetworkRunner.execute_network_pre_run`. """ # Initially extract the `subrun_list` subrun_list = self._extract_subruns(traj, pre_run=pre_run) # counter for subruns subrun_number = 0 # Execute all subruns in order while len(subrun_list) > 0: # Get the next subrun current_subrun = subrun_list.pop(0) # 1. Call `add` of all normal components for component in component_list: component.add_to_network(traj, network, current_subrun, subrun_list, network_dict) # 2. Call `add` of all analyser components for analyser in analyser_list: analyser.add_to_network(traj, network, current_subrun, subrun_list, network_dict) # 3. Call `add` of the network runner itself self.add_to_network(traj, network, current_subrun, subrun_list, network_dict) # 4. Run the network self._logger.info('STARTING subrun `%s` (#%d) lasting %s.' % (current_subrun.v_name, subrun_number, str(current_subrun.f_get()))) network.run(duration=current_subrun.f_get(), report=self._report, report_period=self._report_period) # 5. Call `analyse` of all analyser components for analyser in analyser_list: analyser.analyse(traj, network, current_subrun, subrun_list, network_dict) # 6. Call `remove` of the network runner itself self.remove_from_network(traj, network, current_subrun, subrun_list, network_dict) # 7. Call `remove` for all analyser components for analyser in analyser_list: analyser.remove_from_network(traj, network, current_subrun, subrun_list, network_dict) # 8. Call `remove` for all normal components for component in component_list: component.remove_from_network(traj, network, current_subrun, subrun_list, network_dict) subrun_number += 1
Adds parameters for a network simulation.
def add_parameters(self, traj): """Adds parameters for a network simulation. Calls :func:`~pypet.brian2.network.NetworkComponent.add_parameters` for all components, analyser, and the network runner (in this order). :param traj: Trajectory container """ self._logger.info('Adding Parameters of Components') for component in self.components: component.add_parameters(traj) if self.analysers: self._logger.info('Adding Parameters of Analysers') for analyser in self.analysers: analyser.add_parameters(traj) self._logger.info('Adding Parameters of Runner') self.network_runner.add_parameters(traj)
Pre - builds network components.
def pre_build(self, traj): """Pre-builds network components. Calls :func:`~pypet.brian2.network.NetworkComponent.pre_build` for all components, analysers, and the network runner. `pre_build` is not automatically called but either needs to be executed manually by the user, either calling it directly or by using :func:`~pypet.brian2.network.NetworkManager.pre_run`. This function does not create a `BRIAN2 network`, but only it's components. :param traj: Trajectory container """ self._logger.info('Pre-Building Components') for component in self.components: component.pre_build(traj, self._brian_list, self._network_dict) if self.analysers: self._logger.info('Pre-Building Analysers') for analyser in self.analysers: analyser.pre_build(traj, self._brian_list, self._network_dict) self._logger.info('Pre-Building NetworkRunner') self.network_runner.pre_build(traj, self._brian_list, self._network_dict) self._pre_built = True
Pre - builds network components.
def build(self, traj): """Pre-builds network components. Calls :func:`~pypet.brian2.network.NetworkComponent.build` for all components, analysers and the network runner. `build` does not need to be called by the user. If `~pypet.brian2.network.run_network` is passed to an :class:`~pypet.environment.Environment` with this Network manager, `build` is automatically called for each individual experimental run. :param traj: Trajectory container """ self._logger.info('Building Components') for component in self.components: component.build(traj, self._brian_list, self._network_dict) if self.analysers: self._logger.info('Building Analysers') for analyser in self.analysers: analyser.build(traj, self._brian_list, self._network_dict) self._logger.info('Building NetworkRunner') self.network_runner.build(traj, self._brian_list, self._network_dict)
Starts a network run before the individual run.
def pre_run_network(self, traj): """Starts a network run before the individual run. Useful if a network needs an initial run that can be shared by all individual experimental runs during parameter exploration. Needs to be called by the user. If `pre_run_network` is started by the user, :func:`~pypet.brian2.network.NetworkManager.pre_build` will be automatically called from this function. This function will create a new BRIAN2 network which is run by the :class:`~pypet.brian2.network.NetworkRunner` and it's :func:`~pypet.brian2.network.NetworkRunner.execute_network_pre_run`. To see how a network run is structured also take a look at :func:`~pypet.brian2.network.NetworkRunner.run_network`. :param traj: Trajectory container """ self.pre_build(traj) self._logger.info('\n------------------------\n' 'Pre-Running the Network\n' '------------------------') self._network = self._network_constructor(*self._brian_list) self.network_runner.execute_network_pre_run(traj, self._network, self._network_dict, self.components, self.analysers) self._logger.info('\n-----------------------------\n' 'Network Simulation successful\n' '-----------------------------') self._pre_run = True if hasattr(self._network, 'store'): self._network.store('pre_run')
Top - level simulation function pass this to the environment
def run_network(self, traj): """Top-level simulation function, pass this to the environment Performs an individual network run during parameter exploration. `run_network` does not need to be called by the user. If this method (not this one of the NetworkManager) is passed to an :class:`~pypet.environment.Environment` with this NetworkManager, `run_network` and :func:`~pypet.brian2.network.NetworkManager.build` are automatically called for each individual experimental run. This function will create a new BRIAN2 network in case one was not pre-run. The execution of the network run is carried out by the :class:`~pypet.brian2.network.NetworkRunner` and it's :func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take a look at this function's documentation to see the structure of a network run). :param traj: Trajectory container """ # Check if the network was pre-built if self._pre_built: if self._pre_run and hasattr(self._network, 'restore'): self._network.restore('pre_run') # Temprorary fix for https://github.com/brian-team/brian2/issues/681 self._network.store('pre_run') self._run_network(traj) else: self._run_network(traj)
Starts a single run carried out by a NetworkRunner.
def _run_network(self, traj): """Starts a single run carried out by a NetworkRunner. Called from the public function :func:`~pypet.brian2.network.NetworkManger.run_network`. :param traj: Trajectory container """ self.build(traj) self._pretty_print_explored_parameters(traj) # We need to construct a network object in case one was not pre-run if not self._pre_run: self._network = self._network_constructor(*self._brian_list) # Start the experimental run self.network_runner.execute_network_run(traj, self._network, self._network_dict, self.components, self.analysers) self._logger.info('\n-----------------------------\n' 'Network Simulation successful\n' '-----------------------------')
Function to create generic filenames based on what has been explored
def make_filename(traj): """ Function to create generic filenames based on what has been explored """ explored_parameters = traj.f_get_explored_parameters() filename = '' for param in explored_parameters.values(): short_name = param.v_name val = param.f_get() filename += '%s_%s__' % (short_name, str(val)) return filename[:-2] + '.png'
Simple wrapper function for compatibility with * pypet *.
def wrap_automaton(traj): """ Simple wrapper function for compatibility with *pypet*. We will call the original simulation functions with data extracted from ``traj``. The resulting automaton patterns wil also be stored into the trajectory. :param traj: Trajectory container for data """ # Make initial state initial_state = make_initial_state(traj.initial_name, traj.ncells, traj.seed) # Run simulation pattern = cellular_automaton_1D(initial_state, traj.rule_number, traj.steps) # Store the computed pattern traj.f_add_result('pattern', pattern, comment='Development of CA over time')
Main * boilerplate * function to start simulation
def main(): """ Main *boilerplate* function to start simulation """ # Now let's make use of logging logger = logging.getLogger() # Create folders for data and plots folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet') if not os.path.isdir(folder): os.makedirs(folder) filename = os.path.join(folder, 'all_patterns.hdf5') # Create an environment env = Environment(trajectory='cellular_automata', multiproc=True, ncores=4, wrap_mode='QUEUE', filename=filename, overwrite_file=True) # extract the trajectory traj = env.traj traj.par.ncells = Parameter('ncells', 400, 'Number of cells') traj.par.steps = Parameter('steps', 250, 'Number of timesteps') traj.par.rule_number = Parameter('rule_number', 30, 'The ca rule') traj.par.initial_name = Parameter('initial_name', 'random', 'The type of initial state') traj.par.seed = Parameter('seed', 100042, 'RNG Seed') # Explore exp_dict = {'rule_number' : [10, 30, 90, 110, 184], 'initial_name' : ['single', 'random'],} # # You can uncomment the ``exp_dict`` below to see that changing the # # exploration scheme is now really easy: # exp_dict = {'rule_number' : [10, 30, 90, 110, 184], # 'ncells' : [100, 200, 300], # 'seed': [333444555, 123456]} exp_dict = cartesian_product(exp_dict) traj.f_explore(exp_dict) # Run the simulation logger.info('Starting Simulation') env.run(wrap_automaton) # Load all data traj.f_load(load_data=2) logger.info('Printing data') for idx, run_name in enumerate(traj.f_iter_runs()): # Plot all patterns filename = os.path.join(folder, make_filename(traj)) plot_pattern(traj.crun.pattern, traj.rule_number, filename) progressbar(idx, len(traj), logger=logger) # Finally disable logging and close all log-files env.disable_logging()
Returns next element from chain.
def next(self): """Returns next element from chain. More precisely, it returns the next element of the foremost iterator. If this iterator is empty it moves iteratively along the chain of available iterators to pick the new foremost one. Raises StopIteration if there are no elements left. """ while True: # We need this loop because some iterators may already be empty. # We keep on popping from the left until next succeeds and as long # as there are iterators available try: return next(self._current) except StopIteration: try: self._current = iter(self._chain.popleft()) except IndexError: # If we run out of iterators we are sure that # there can be no more element raise StopIteration('Reached end of iterator chain')
Merges all files in a given folder.
def merge_all_in_folder(folder, ext='.hdf5', dynamic_imports=None, storage_service=None, force=False, ignore_data=(), move_data=False, delete_other_files=False, keep_info=True, keep_other_trajectory_info=True, merge_config=True, backup=True): """Merges all files in a given folder. IMPORTANT: Does not check if there are more than 1 trajectory in a file. Always uses the last trajectory in file and ignores the other ones. Trajectories are merged according to the alphabetical order of the files, i.e. the resulting merged trajectory is found in the first file (according to lexicographic ordering). :param folder: folder (not recursive) where to look for files :param ext: only files with the given extension are used :param dynamic_imports: Dynamic imports for loading :param storage_service: storage service to use, leave `None` to use the default one :param force: If loading should be forced. :param delete_other_files: Deletes files of merged trajectories All other parameters as in `f_merge_many` of the trajectory. :return: The merged traj """ in_dir = os.listdir(folder) all_files = [] # Find all files with matching extension for file in in_dir: full_file = os.path.join(folder, file) if os.path.isfile(full_file): _, extension = os.path.splitext(full_file) if extension == ext: all_files.append(full_file) all_files = sorted(all_files) # Open all trajectories trajs = [] for full_file in all_files: traj = load_trajectory(index=-1, storage_service=storage_service, filename=full_file, load_data=0, force=force, dynamic_imports=dynamic_imports) trajs.append(traj) # Merge all trajectories first_traj = trajs.pop(0) first_traj.f_merge_many(trajs, ignore_data=ignore_data, move_data=move_data, delete_other_trajectory=False, keep_info=keep_info, keep_other_trajectory_info=keep_other_trajectory_info, merge_config=merge_config, backup=backup) if delete_other_files: # Delete all but the first file for file in all_files[1:]: os.remove(file) return first_traj
Handler of SIGINT
def _handle_sigint(self, signum, frame): """Handler of SIGINT Does nothing if SIGINT is encountered once but raises a KeyboardInterrupt in case it is encountered twice. immediatly. """ if self.hit: prompt = 'Exiting immediately!' raise KeyboardInterrupt(prompt) else: self.hit = True prompt = ('\nYou killed the process(es) via `SIGINT` (`CTRL+C`). ' 'I am trying to exit ' 'gracefully. Using `SIGINT` (`CTRL+C`) ' 'again will cause an immediate exit.\n') sys.stderr.write(prompt)
Small configuration file management function
def config_from_file(filename, config=None): ''' Small configuration file management function''' if config: # We're writing configuration try: with open(filename, 'w') as fdesc: fdesc.write(json.dumps(config)) except IOError as error: logger.exception(error) return False return True else: # We're reading config if os.path.isfile(filename): try: with open(filename, 'r') as fdesc: return json.loads(fdesc.read()) except IOError as error: return False else: return {}
Method to request a PIN from ecobee for authorization
def request_pin(self): ''' Method to request a PIN from ecobee for authorization ''' url = 'https://api.ecobee.com/authorize' params = {'response_type': 'ecobeePin', 'client_id': self.api_key, 'scope': 'smartWrite'} try: request = requests.get(url, params=params) except RequestException: logger.warn("Error connecting to Ecobee. Possible connectivity outage." "Could not request pin.") return self.authorization_code = request.json()['code'] self.pin = request.json()['ecobeePin'] logger.error('Please authorize your ecobee developer app with PIN code ' + self.pin + '\nGoto https://www.ecobee.com/consumerportal' '/index.html, click\nMy Apps, Add application, Enter Pin' ' and click Authorize.\nAfter authorizing, call request_' 'tokens() method.')
Method to request API tokens from ecobee
def request_tokens(self): ''' Method to request API tokens from ecobee ''' url = 'https://api.ecobee.com/token' params = {'grant_type': 'ecobeePin', 'code': self.authorization_code, 'client_id': self.api_key} try: request = requests.post(url, params=params) except RequestException: logger.warn("Error connecting to Ecobee. Possible connectivity outage." "Could not request token.") return if request.status_code == requests.codes.ok: self.access_token = request.json()['access_token'] self.refresh_token = request.json()['refresh_token'] self.write_tokens_to_file() self.pin = None else: logger.warn('Error while requesting tokens from ecobee.com.' ' Status code: ' + str(request.status_code)) return
Method to refresh API tokens from ecobee
def refresh_tokens(self): ''' Method to refresh API tokens from ecobee ''' url = 'https://api.ecobee.com/token' params = {'grant_type': 'refresh_token', 'refresh_token': self.refresh_token, 'client_id': self.api_key} request = requests.post(url, params=params) if request.status_code == requests.codes.ok: self.access_token = request.json()['access_token'] self.refresh_token = request.json()['refresh_token'] self.write_tokens_to_file() return True else: self.request_pin()
Set self. thermostats to a json list of thermostats from ecobee
def get_thermostats(self): ''' Set self.thermostats to a json list of thermostats from ecobee ''' url = 'https://api.ecobee.com/1/thermostat' header = {'Content-Type': 'application/json;charset=UTF-8', 'Authorization': 'Bearer ' + self.access_token} params = {'json': ('{"selection":{"selectionType":"registered",' '"includeRuntime":"true",' '"includeSensors":"true",' '"includeProgram":"true",' '"includeEquipmentStatus":"true",' '"includeEvents":"true",' '"includeWeather":"true",' '"includeSettings":"true"}}')} try: request = requests.get(url, headers=header, params=params) except RequestException: logger.warn("Error connecting to Ecobee. Possible connectivity outage.") return None if request.status_code == requests.codes.ok: self.authenticated = True self.thermostats = request.json()['thermostatList'] return self.thermostats else: self.authenticated = False logger.info("Error connecting to Ecobee while attempting to get " "thermostat data. Refreshing tokens and trying again.") if self.refresh_tokens(): return self.get_thermostats() else: return None
Write api tokens to a file
def write_tokens_to_file(self): ''' Write api tokens to a file ''' config = dict() config['API_KEY'] = self.api_key config['ACCESS_TOKEN'] = self.access_token config['REFRESH_TOKEN'] = self.refresh_token config['AUTHORIZATION_CODE'] = self.authorization_code if self.file_based_config: config_from_file(self.config_filename, config) else: self.config = config
possible hvac modes are auto auxHeatOnly cool heat off
def set_hvac_mode(self, index, hvac_mode): ''' possible hvac modes are auto, auxHeatOnly, cool, heat, off ''' body = {"selection": {"selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "thermostat": { "settings": { "hvacMode": hvac_mode } }} log_msg_action = "set HVAC mode" return self.make_request(body, log_msg_action)
The minimum time in minutes to run the fan each hour. Value from 1 to 60
def set_fan_min_on_time(self, index, fan_min_on_time): ''' The minimum time, in minutes, to run the fan each hour. Value from 1 to 60 ''' body = {"selection": {"selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "thermostat": { "settings": { "fanMinOnTime": fan_min_on_time } }} log_msg_action = "set fan minimum on time." return self.make_request(body, log_msg_action)
Set fan mode. Values: auto minontime on
def set_fan_mode(self, index, fan_mode, cool_temp, heat_temp, hold_type="nextTransition"): ''' Set fan mode. Values: auto, minontime, on ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "setHold", "params": { "holdType": hold_type, "coolHoldTemp": int(cool_temp * 10), "heatHoldTemp": int(heat_temp * 10), "fan": fan_mode }}]} log_msg_action = "set fan mode" return self.make_request(body, log_msg_action)
Set a hold
def set_hold_temp(self, index, cool_temp, heat_temp, hold_type="nextTransition"): ''' Set a hold ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "setHold", "params": { "holdType": hold_type, "coolHoldTemp": int(cool_temp * 10), "heatHoldTemp": int(heat_temp * 10) }}]} log_msg_action = "set hold temp" return self.make_request(body, log_msg_action)
Set a climate hold - ie away home sleep
def set_climate_hold(self, index, climate, hold_type="nextTransition"): ''' Set a climate hold - ie away, home, sleep ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "setHold", "params": { "holdType": hold_type, "holdClimateRef": climate }}]} log_msg_action = "set climate hold" return self.make_request(body, log_msg_action)
Delete the vacation with name vacation
def delete_vacation(self, index, vacation): ''' Delete the vacation with name vacation ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "deleteVacation", "params": { "name": vacation }}]} log_msg_action = "delete a vacation" return self.make_request(body, log_msg_action)
Resume currently scheduled program
def resume_program(self, index, resume_all=False): ''' Resume currently scheduled program ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "resumeProgram", "params": { "resumeAll": resume_all }}]} log_msg_action = "resume program" return self.make_request(body, log_msg_action)
Send a message to the thermostat
def send_message(self, index, message="Hello from python-ecobee!"): ''' Send a message to the thermostat ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "sendMessage", "params": { "text": message[0:500] }}]} log_msg_action = "send message" return self.make_request(body, log_msg_action)
Set humidity level
def set_humidity(self, index, humidity): ''' Set humidity level''' body = {"selection": {"selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "thermostat": { "settings": { "humidity": int(humidity) } }} log_msg_action = "set humidity level" return self.make_request(body, log_msg_action)
Enable/ disable Alexa mic ( only for Ecobee 4 ) Values: True False
def set_mic_mode(self, index, mic_enabled): '''Enable/disable Alexa mic (only for Ecobee 4) Values: True, False ''' body = { 'selection': { 'selectionType': 'thermostats', 'selectionMatch': self.thermostats[index]['identifier']}, 'thermostat': { 'audio': { 'microphoneEnabled': mic_enabled}}} log_msg_action = 'set mic mode' return self.make_request(body, log_msg_action)
Enable/ disable Smart Home/ Away and Follow Me modes Values: True False
def set_occupancy_modes(self, index, auto_away=None, follow_me=None): '''Enable/disable Smart Home/Away and Follow Me modes Values: True, False ''' body = { 'selection': { 'selectionType': 'thermostats', 'selectionMatch': self.thermostats[index]['identifier']}, 'thermostat': { 'settings': { 'autoAway': auto_away, 'followMeComfort': follow_me}}} log_msg_action = 'set occupancy modes' return self.make_request(body, log_msg_action)
Enable/ disable daylight savings Values: True False
def set_dst_mode(self, index, dst): '''Enable/disable daylight savings Values: True, False ''' body = { 'selection': { 'selectionType': 'thermostats', 'selectionMatch': self.thermostats[index]['identifier']}, 'thermostat': { 'location': { 'isDaylightSaving': dst}}} log_msg_action = 'set dst mode' return self.make_request(body, log_msg_action)
.
def future_dt_str(dt, td): """.""" if isinstance(td, str): td = float(td) td = timedelta(seconds=td) future_dt = dt + td return future_dt.strftime(DT_PRINT_FORMAT)
Generate the delay in seconds in which the DISCOVER will be sent.
def gen_delay_selecting(): """Generate the delay in seconds in which the DISCOVER will be sent. [:rfc:`2131#section-4.4.1`]:: The client SHOULD wait a random time between one and ten seconds to desynchronize the use of DHCP at startup. """ delay = float(random.randint(0, MAX_DELAY_SELECTING)) logger.debug('Delay to enter in SELECTING %s.', delay) logger.debug('SELECTING will happen on %s', future_dt_str(nowutc(), delay)) return delay
Generate the time in seconds in which DHCPDISCOVER wil be retransmited.
def gen_timeout_resend(attempts): """Generate the time in seconds in which DHCPDISCOVER wil be retransmited. [:rfc:`2131#section-3.1`]:: might retransmit the DHCPREQUEST message four times, for a total delay of 60 seconds [:rfc:`2131#section-4.1`]:: For example, in a 10Mb/sec Ethernet internetwork, the delay before the first retransmission SHOULD be 4 seconds randomized by the value of a uniform random number chosen from the range -1 to +1. Clients with clocks that provide resolution granularity of less than one second may choose a non-integer randomization value. The delay before the next retransmission SHOULD be 8 seconds randomized by the value of a uniform number chosen from the range -1 to +1. The retransmission delay SHOULD be doubled with subsequent retransmissions up to a maximum of 64 seconds. """ timeout = 2 ** (attempts + 1) + random.uniform(-1, +1) logger.debug('next timeout resending will happen on %s', future_dt_str(nowutc(), timeout)) return timeout
Generate time in seconds to retransmit DHCPREQUEST.
def gen_timeout_request_renew(lease): """Generate time in seconds to retransmit DHCPREQUEST. [:rfc:`2131#section-4..4.5`]:: In both RENEWING and REBINDING states, if the client receives no response to its DHCPREQUEST message, the client SHOULD wait one-half of the remaining time until T2 (in RENEWING state) and one-half of the remaining lease time (in REBINDING state), down to a minimum of 60 seconds, before retransmitting the DHCPREQUEST message. """ time_left = (lease.rebinding_time - lease.renewing_time) * RENEW_PERC if time_left < 60: time_left = 60 logger.debug('Next request in renew will happen on %s', future_dt_str(nowutc(), time_left)) return time_left
.
def gen_timeout_request_rebind(lease): """.""" time_left = (lease.lease_time - lease.rebinding_time) * RENEW_PERC if time_left < 60: time_left = 60 logger.debug('Next request on rebinding will happen on %s', future_dt_str(nowutc(), time_left)) return time_left
Generate RENEWING time.
def gen_renewing_time(lease_time, elapsed=0): """Generate RENEWING time. [:rfc:`2131#section-4.4.5`]:: T1 defaults to (0.5 * duration_of_lease). T2 defaults to (0.875 * duration_of_lease). Times T1 and T2 SHOULD be chosen with some random "fuzz" around a fixed value, to avoid synchronization of client reacquisition. """ renewing_time = int(lease_time) * RENEW_PERC - elapsed # FIXME:80 [:rfc:`2131#section-4.4.5`]: the chosen "fuzz" could fingerprint # the implementation # NOTE: here using same "fuzz" as systemd? range_fuzz = int(lease_time) * REBIND_PERC - renewing_time logger.debug('rebinding fuzz range %s', range_fuzz) fuzz = random.uniform(-(range_fuzz), +(range_fuzz)) renewing_time += fuzz logger.debug('Renewing time %s.', renewing_time) return renewing_time
.
def gen_rebinding_time(lease_time, elapsed=0): """.""" rebinding_time = int(lease_time) * REBIND_PERC - elapsed # FIXME:90 [:rfc:`2131#section-4.4.5`]: the chosen "fuzz" could fingerprint # the implementation # NOTE: here using same "fuzz" as systemd? range_fuzz = int(lease_time) - rebinding_time logger.debug('rebinding fuzz range %s', range_fuzz) fuzz = random.uniform(-(range_fuzz), +(range_fuzz)) rebinding_time += fuzz logger.debug('Rebinding time %s.', rebinding_time) return rebinding_time
Return the self object attributes not inherited as dict.
def dict_self(self): """Return the self object attributes not inherited as dict.""" return {k: v for k, v in self.__dict__.items() if k in FSM_ATTRS}
Reset object attributes when state is INIT.
def reset(self, iface=None, client_mac=None, xid=None, scriptfile=None): """Reset object attributes when state is INIT.""" logger.debug('Reseting attributes.') if iface is None: iface = conf.iface if client_mac is None: # scapy for python 3 returns byte, not tuple tempmac = get_if_raw_hwaddr(iface) if isinstance(tempmac, tuple) and len(tempmac) == 2: mac = tempmac[1] else: mac = tempmac client_mac = str2mac(mac) self.client = DHCPCAP(iface=iface, client_mac=client_mac, xid=xid) if scriptfile is not None: self.script = ClientScript(scriptfile) else: self.script = None self.time_sent_request = None self.discover_attempts = 0 self.request_attempts = 0 self.current_state = STATE_PREINIT self.offers = list()
Workaround to get timeout in the ATMT. timeout class method.
def get_timeout(self, state, function): """Workaround to get timeout in the ATMT.timeout class method.""" state = STATES2NAMES[state] for timeout_fn_t in self.timeout[state]: # access the function name if timeout_fn_t[1] is not None and \ timeout_fn_t[1].atmt_condname == function.atmt_condname: logger.debug('Timeout for state %s, function %s, is %s', state, function.atmt_condname, timeout_fn_t[0]) return timeout_fn_t[0] return None
Workaround to change timeout values in the ATMT. timeout class method.
def set_timeout(self, state, function, newtimeout): """ Workaround to change timeout values in the ATMT.timeout class method. self.timeout format is:: {'STATE': [ (TIMEOUT0, <function foo>), (TIMEOUT1, <function bar>)), (None, None) ], } """ state = STATES2NAMES[state] for timeout_fn_t in self.timeout[state]: # access the function name if timeout_fn_t[1] is not None and \ timeout_fn_t[1].atmt_condname == function.atmt_condname: # convert list to tuple to make it mutable timeout_l = list(timeout_fn_t) # modify the timeout timeout_l[0] = newtimeout # set the new timeoute to self.timeout i = self.timeout[state].index(timeout_fn_t) self.timeout[state][i] = tuple(timeout_l) logger.debug('Set state %s, function %s, to timeout %s', state, function.atmt_condname, newtimeout)
Send discover.
def send_discover(self): """Send discover.""" assert self.client assert self.current_state == STATE_INIT or \ self.current_state == STATE_SELECTING pkt = self.client.gen_discover() sendp(pkt) # FIXME:20 check that this is correct,: all or only discover? if self.discover_attempts < MAX_ATTEMPTS_DISCOVER: self.discover_attempts += 1 timeout = gen_timeout_resend(self.discover_attempts) self.set_timeout(self.current_state, self.timeout_selecting, timeout)
Select an offer from the offers received.
def select_offer(self): """Select an offer from the offers received. [:rfc:`2131#section-4.2`]:: DHCP clients are free to use any strategy in selecting a DHCP server among those from which the client receives a DHCPOFFER. [:rfc:`2131#section-4.4.1`]:: The time over which the client collects messages and the mechanism used to select one DHCPOFFER are implementation dependent. Nor [:rfc:`7844`] nor [:rfc:`2131`] specify the algorithm. Here, currently the first offer is selected. .. todo:: - Check other implementations algorithm to select offer. """ logger.debug('Selecting offer.') pkt = self.offers[0] self.client.handle_offer(pkt)
Send request.
def send_request(self): """Send request. [:rfc:`2131#section-3.1`]:: a client retransmitting as described in section 4.1 might retransmit the DHCPREQUEST message four times, for a total delay of 60 seconds .. todo:: - The maximum number of retransmitted REQUESTs is per state or in total? - Are the retransmitted REQUESTs independent to the retransmitted DISCOVERs? """ assert self.client if self.current_state == STATE_BOUND: pkt = self.client.gen_request_unicast() else: pkt = self.client.gen_request() sendp(pkt) logger.debug('Modifying FSM obj, setting time_sent_request.') self.time_sent_request = nowutc() logger.info('DHCPREQUEST of %s on %s to %s port %s', self.client.iface, self.client.client_ip, self.client.server_ip, self.client.server_port) # NOTE: see previous TODO, maybe the MAX_ATTEMPTS_REQUEST needs to be # calculated per state. if self.request_attempts < MAX_ATTEMPTS_REQUEST: self.request_attempts *= 2 logger.debug('Increased request attempts to %s', self.request_attempts) if self.current_state == STATE_RENEWING: timeout_renewing = gen_timeout_request_renew(self.client.lease) self.set_timeout(self.current_state, self.timeout_request_renewing, timeout_renewing) elif self.current_state == STATE_REBINDING: timeout_rebinding = gen_timeout_request_rebind(self.client.lease) self.set_timeout(self.current_state, self.timeout_request_rebinding, timeout_rebinding) else: timeout_requesting = \ gen_timeout_resend(self.request_attempts) self.set_timeout(self.current_state, self.timeout_requesting, timeout_requesting)
Set renewal rebinding times.
def set_timers(self): """Set renewal, rebinding times.""" logger.debug('setting timeouts') self.set_timeout(self.current_state, self.renewing_time_expires, self.client.lease.renewal_time) self.set_timeout(self.current_state, self.rebinding_time_expires, self.client.lease.rebinding_time)
Process a received ACK packet.
def process_received_ack(self, pkt): """Process a received ACK packet. Not specifiyed in [:rfc:`7844`]. Probe the offered IP in [:rfc:`2131#section-2.2.`]:: the allocating server SHOULD probe the reused address before allocating the address, e.g., with an ICMP echo request, and the client SHOULD probe the newly received address, e.g., with ARP. The client SHOULD broadcast an ARP reply to announce the client's new IP address and clear any outdated ARP cache entries in hosts on the client's subnet. It is also not specifiyed in [:rfc:`7844`] nor [:rfc:`2131`] how to check that the offered IP is valid. .. todo:: - Check that nor ``dhclient`` nor ``systemd-networkd`` send an ARP. - Check how other implementations check that the ACK paremeters are valid, ie, if the ACK fields match the fields in the OFFER. - Check to which state the client should go back to when the offered parameters are not valid. """ if isack(pkt): try: self.event = self.client.handle_ack(pkt, self.time_sent_request) except AddrFormatError as err: logger.error(err) # NOTE: see previous TODO, maybe should go back to other state. raise self.SELECTING() # NOTE: see previous TODO, not checking address with ARP. logger.info('DHCPACK of %s from %s' % (self.client.client_ip, self.client.server_ip)) return True return False
Process a received NAK packet.
def process_received_nak(self, pkt): """Process a received NAK packet.""" if isnak(pkt): logger.info('DHCPNAK of %s from %s', self.client.client_ip, self.client.server_ip) return True return False
INIT state.
def INIT(self): """INIT state. [:rfc:`2131#section-4.4.1`]:: The client SHOULD wait a random time between one and ten seconds to desynchronize the use of DHCP at startup .. todo:: - The initial delay is implemented, but probably is not in other implementations. Check what other implementations do. """ # NOTE: in case INIT is reached from other state, initialize attributes # reset all variables. logger.debug('In state: INIT') if self.current_state is not STATE_PREINIT: self.reset() self.current_state = STATE_INIT # NOTE: see previous TODO, maybe this is not needed. if self.delay_selecting: if self.delay_before_selecting is None: delay_before_selecting = gen_delay_selecting() else: delay_before_selecting = self.delay_before_selecting else: delay_before_selecting = 0 self.set_timeout(self.current_state, self.timeout_delay_before_selecting, delay_before_selecting) if self.timeout_select is not None: self.set_timeout(STATE_SELECTING, self.timeout_selecting, self.timeout_select)
BOUND state.
def BOUND(self): """BOUND state.""" logger.debug('In state: BOUND') logger.info('(%s) state changed %s -> bound', self.client.iface, STATES2NAMES[self.current_state]) self.current_state = STATE_BOUND self.client.lease.info_lease() if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: try: set_net(self.client.lease) except Exception as e: logger.error('Can not set IP', exc_info=True)
RENEWING state.
def RENEWING(self): """RENEWING state.""" logger.debug('In state: RENEWING') self.current_state = STATE_RENEWING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease)
REBINDING state.
def REBINDING(self): """REBINDING state.""" logger.debug('In state: REBINDING') self.current_state = STATE_REBINDING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease)
END state.
def END(self): """END state.""" logger.debug('In state: END') self.current_state = STATE_END if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease) return
ERROR state.
def ERROR(self): """ERROR state.""" logger.debug('In state: ERROR') self.current_state = STATE_ERROR if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() set_net(self.client.lease) raise self.INIT()
Timeout of selecting on SELECTING state.
def timeout_selecting(self): """Timeout of selecting on SELECTING state. Not specifiyed in [:rfc:`7844`]. See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`. """ logger.debug('C2.1: T In %s, timeout receiving response to select.', self.current_state) if len(self.offers) >= MAX_OFFERS_COLLECTED: logger.debug('C2.2: T Maximum number of offers reached, ' 'raise REQUESTING.') raise self.REQUESTING() if self.discover_attempts >= MAX_ATTEMPTS_DISCOVER: logger.debug('C2.3: T Maximum number of discover retries is %s' ' and already sent %s.', MAX_ATTEMPTS_DISCOVER, self.discover_attempts) if len(self.offers) <= 0: logger.debug('C2.4: T. But no OFFERS where received, ' 'raise ERROR.') raise self.ERROR() logger.debug('C2.4: F. But there is some OFFERS, ' 'raise REQUESTING.') raise self.REQUESTING() logger.debug('C2.2: F. Still not received all OFFERS, but not ' 'max # attemps reached, raise SELECTING.') raise self.SELECTING()
Timeout requesting in REQUESTING state.
def timeout_requesting(self): """Timeout requesting in REQUESTING state. Not specifiyed in [:rfc:`7844`] [:rfc:`2131#section-3.1`]:: might retransmit the DHCPREQUEST message four times, for a total delay of 60 seconds """ logger.debug("C3.2: T. In %s, timeout receiving response to request, ", self.current_state) if self.discover_requests >= MAX_ATTEMPTS_REQUEST: logger.debug('C2.3: T. Maximum number %s of REQUESTs ' 'reached, already sent %s, raise ERROR.', MAX_ATTEMPTS_REQUEST, self.disover_requests) raise self.ERROR() logger.debug("C2.3: F. Maximum number of REQUESTs retries not reached," "raise REQUESTING.") raise self.REQUESTING()
Timeout of renewing on RENEWING state.
def timeout_request_renewing(self): """Timeout of renewing on RENEWING state. Same comments as in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_requesting`. """ logger.debug("C5.2:T In %s, timeout receiving response to request.", self.current_state) if self.request_attempts >= MAX_ATTEMPTS_REQUEST: logger.debug('C2.3: T Maximum number %s of REQUESTs ' 'reached, already sent %s, wait to rebinding time.', MAX_ATTEMPTS_REQUEST, self.disover_requests) # raise self.ERROR() logger.debug("C2.3: F. Maximum number of REQUESTs retries not reached," "raise RENEWING.") raise self.RENEWING()
Timeout of request rebinding on REBINDING state.
def timeout_request_rebinding(self): """Timeout of request rebinding on REBINDING state. Same comments as in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_requesting`. """ logger.debug("C6.2:T In %s, timeout receiving response to request.", self.current_state) if self.request_attempts >= MAX_ATTEMPTS_REQUEST: logger.debug('C.2.3: T. Maximum number %s of REQUESTs ' 'reached, already sent %s, wait lease time expires.', MAX_ATTEMPTS_REQUEST, self.disover_requests) # raise self.ERROR() logger.debug("C2.3: F. Maximum number of REQUESTs retries not reached," "raise REBINDING.") raise self.REBINDING()
Receive offer on SELECTING state.
def receive_offer(self, pkt): """Receive offer on SELECTING state.""" logger.debug("C2. Received OFFER?, in SELECTING state.") if isoffer(pkt): logger.debug("C2: T, OFFER received") self.offers.append(pkt) if len(self.offers) >= MAX_OFFERS_COLLECTED: logger.debug("C2.5: T, raise REQUESTING.") self.select_offer() raise self.REQUESTING() logger.debug("C2.5: F, raise SELECTING.") raise self.SELECTING()
Receive ACK in REQUESTING state.
def receive_ack_requesting(self, pkt): """Receive ACK in REQUESTING state.""" logger.debug("C3. Received ACK?, in REQUESTING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in REQUESTING state, " "raise BOUND.") raise self.BOUND()
Receive NAK in REQUESTING state.
def receive_nak_requesting(self, pkt): """Receive NAK in REQUESTING state.""" logger.debug("C3.1. Received NAK?, in REQUESTING state.") if self.process_received_nak(pkt): logger.debug("C3.1: T. Received NAK, in REQUESTING state, " "raise INIT.") raise self.INIT()
Receive ACK in RENEWING state.
def receive_ack_renewing(self, pkt): """Receive ACK in RENEWING state.""" logger.debug("C3. Received ACK?, in RENEWING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in RENEWING state, " "raise BOUND.") raise self.BOUND()
Receive NAK in RENEWING state.
def receive_nak_renewing(self, pkt): """Receive NAK in RENEWING state.""" logger.debug("C3.1. Received NAK?, in RENEWING state.") if self.process_received_nak(pkt): logger.debug("C3.1: T. Received NAK, in RENEWING state, " " raise INIT.") raise self.INIT()
Receive ACK in REBINDING state.
def receive_ack_rebinding(self, pkt): """Receive ACK in REBINDING state.""" logger.debug("C3. Received ACK?, in REBINDING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in REBINDING state, " "raise BOUND.") raise self.BOUND()
Receive NAK in REBINDING state.
def receive_nak_rebinding(self, pkt): """Receive NAK in REBINDING state.""" logger.debug("C3.1. Received NAK?, in RENEWING state.") if self.process_received_nak(pkt): logger.debug("C3.1: T. Received NAK, in RENEWING state, " "raise INIT.") raise self.INIT()
Action on renewing on RENEWING state.
def on_renewing(self): """Action on renewing on RENEWING state. Not recording lease, but restarting timers. """ self.client.lease.sanitize_net_values() self.client.lease.set_times(self.time_sent_request) self.set_timers()
.
def isoffer(packet): """.""" if DHCP in packet and (DHCPTypes.get(packet[DHCP].options[0][1]) == 'offer' or packet[DHCP].options[0][1] == "offer"): logger.debug('Packet is Offer.') return True return False
Assign a value remove if it s None
def set(self, name, value): """ Assign a value, remove if it's None """ clone = self._clone() if django.VERSION[0] <= 1 and django.VERSION[1] <= 4: value = value or None clone._qsl = [(q, v) for (q, v) in self._qsl if q != name] if value is not None: clone._qsl.append((name, value)) return clone
Append a value to multiple value parameter.
def add(self, name, value): """ Append a value to multiple value parameter. """ clone = self._clone() clone._qsl = [p for p in self._qsl if not(p[0] == name and p[1] == value)] clone._qsl.append((name, value,)) return clone
Remove a value from multiple value parameter.
def remove(self, name, value): """ Remove a value from multiple value parameter. """ clone = self._clone() clone._qsl = [qb for qb in self._qsl if qb != (name, str(value))] return clone