sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _declare_consumer(self, consumer, nowait=False): """Declare consumer so messages can be received from it using :meth:`iterconsume`.""" if consumer.queue not in self._open_consumers: # Use the ConsumerSet's consumer by default, but if the # child consumer has a callback, honor it. callback = consumer.callbacks and \ consumer._receive_callback or self._receive_callback self.backend.declare_consumer(queue=consumer.queue, no_ack=consumer.no_ack, nowait=nowait, callback=callback, consumer_tag=consumer.consumer_tag) self._open_consumers[consumer.queue] = consumer.consumer_tag
Declare consumer so messages can be received from it using :meth:`iterconsume`.
entailment
def consume(self): """Declare consumers.""" head = self.consumers[:-1] tail = self.consumers[-1] [self._declare_consumer(consumer, nowait=True) for consumer in head] self._declare_consumer(tail, nowait=False)
Declare consumers.
entailment
def iterconsume(self, limit=None): """Cycle between all consumers in consume mode. See :meth:`Consumer.iterconsume`. """ self.consume() return self.backend.consume(limit=limit)
Cycle between all consumers in consume mode. See :meth:`Consumer.iterconsume`.
entailment
def cancel(self): """Cancel a running :meth:`iterconsume` session.""" for consumer_tag in self._open_consumers.values(): try: self.backend.cancel(consumer_tag) except KeyError: pass self._open_consumers.clear()
Cancel a running :meth:`iterconsume` session.
entailment
def convert_md_to_rst(source, destination=None, backup_dir=None): """Try to convert the source, an .md (markdown) file, to an .rst (reStructuredText) file at the destination. If the destination isn't provided, it defaults to be the same as the source path except for the filename extension. If the destination file already exists, it will be overwritten. In the event of an error, the destination file will be left untouched.""" # Doing this in the function instead of the module level ensures the # error occurs when the function is called, rather than when the module # is evaluated. try: import pypandoc except ImportError: # Don't give up right away; first try to install the python module. os.system("pip install pypandoc") import pypandoc # Set our destination path to a default, if necessary destination = destination or (os.path.splitext(source)[0] + '.rst') # Likewise for the backup directory backup_dir = backup_dir or os.path.join(os.path.dirname(destination), 'bak') bak_name = (os.path.basename(destination) + time.strftime('.%Y%m%d%H%M%S.bak')) bak_path = os.path.join(backup_dir, bak_name) # If there's already a file at the destination path, move it out of the # way, but don't delete it. if os.path.isfile(destination): if not os.path.isdir(os.path.dirname(bak_path)): os.mkdir(os.path.dirname(bak_path)) os.rename(destination, bak_path) try: # Try to convert the file. pypandoc.convert( source, 'rst', format='md', outputfile=destination ) except: # If for any reason the conversion fails, try to put things back # like we found them. if os.path.isfile(destination): os.remove(destination) if os.path.isfile(bak_path): os.rename(bak_path, destination) raise
Try to convert the source, an .md (markdown) file, to an .rst (reStructuredText) file at the destination. If the destination isn't provided, it defaults to be the same as the source path except for the filename extension. If the destination file already exists, it will be overwritten. In the event of an error, the destination file will be left untouched.
entailment
def build_readme(base_path=None): """Call the conversion routine on README.md to generate README.rst. Why do all this? Because pypi requires reStructuredText, but markdown is friendlier to work with and is nicer for GitHub.""" if base_path: path = os.path.join(base_path, 'README.md') else: path = 'README.md' convert_md_to_rst(path) print("Successfully converted README.md to README.rst")
Call the conversion routine on README.md to generate README.rst. Why do all this? Because pypi requires reStructuredText, but markdown is friendlier to work with and is nicer for GitHub.
entailment
def sense(self): """Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. """ self.current_situation = bitstrings.BitString([ random.randrange(2) for _ in range(self.address_size + (1 << self.address_size)) ]) return self.current_situation
Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation.
entailment
def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ assert action in self.possible_actions self.remaining_cycles -= 1 index = int(bitstrings.BitString( self.current_situation[:self.address_size] )) bit = self.current_situation[self.address_size + index] return action == bit
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
entailment
def reset(self): """Reset the scenario, starting it over for a new run. Usage: if not scenario.more(): scenario.reset() Arguments: None Return: None """ self.remaining_cycles = self.initial_training_cycles self.needle_index = random.randrange(self.input_size)
Reset the scenario, starting it over for a new run. Usage: if not scenario.more(): scenario.reset() Arguments: None Return: None
entailment
def sense(self): """Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. """ haystack = bitstrings.BitString.random(self.input_size) self.needle_value = haystack[self.needle_index] return haystack
Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation.
entailment
def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ assert action in self.possible_actions self.remaining_cycles -= 1 return action == self.needle_value
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
entailment
def get_possible_actions(self): """Return a sequence containing the possible actions that can be executed within the environment. Usage: possible_actions = scenario.get_possible_actions() Arguments: None Return: A sequence containing the possible actions which can be executed within this scenario. """ possible_actions = self.wrapped.get_possible_actions() if len(possible_actions) <= 20: # Try to ensure that the possible actions are unique. Also, put # them into a list so we can iterate over them safely before # returning them; this avoids accidentally exhausting an # iterator, if the wrapped class happens to return one. try: possible_actions = list(set(possible_actions)) except TypeError: possible_actions = list(possible_actions) try: possible_actions.sort() except TypeError: pass self.logger.info('Possible actions:') for action in possible_actions: self.logger.info(' %s', action) else: self.logger.info("%d possible actions.", len(possible_actions)) return possible_actions
Return a sequence containing the possible actions that can be executed within the environment. Usage: possible_actions = scenario.get_possible_actions() Arguments: None Return: A sequence containing the possible actions which can be executed within this scenario.
entailment
def sense(self): """Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. """ situation = self.wrapped.sense() self.logger.debug('Situation: %s', situation) return situation
Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation.
entailment
def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ self.logger.debug('Executing action: %s', action) reward = self.wrapped.execute(action) if reward: self.total_reward += reward self.steps += 1 self.logger.debug('Reward received on this step: %.5f', reward or 0) self.logger.debug('Average reward per step: %.5f', self.total_reward / self.steps) return reward
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
entailment
def more(self): """Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run. """ more = self.wrapped.more() if not self.steps % 100: self.logger.info('Steps completed: %d', self.steps) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) if not more: self.logger.info('Run completed.') self.logger.info('Total steps: %d', self.steps) self.logger.info('Total reward received: %.5f', self.total_reward) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) return more
Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run.
entailment
def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ reward = self.reward_function( action, self.classifications[self.steps] ) self.total_reward += reward self.steps += 1 return reward
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
entailment
def get_classifications(self): """Return the classifications made by the algorithm for this scenario. Usage: model.run(scenario, learn=False) classifications = scenario.get_classifications() Arguments: None Return: An indexable sequence containing the classifications made by the model for each situation, in the same order as the original situations themselves appear. """ if bitstrings.using_numpy(): return numpy.array(self.classifications) else: return self.classifications
Return the classifications made by the algorithm for this scenario. Usage: model.run(scenario, learn=False) classifications = scenario.get_classifications() Arguments: None Return: An indexable sequence containing the classifications made by the model for each situation, in the same order as the original situations themselves appear.
entailment
def new_model(self, scenario): """Create and return a new classifier set initialized for handling the given scenario. Usage: scenario = MUXProblem() model = algorithm.new_model(scenario) model.run(scenario, learn=True) Arguments: scenario: A Scenario instance. Return: A new, untrained classifier set, suited for the given scenario. """ assert isinstance(scenario, scenarios.Scenario) return ClassifierSet(self, scenario.get_possible_actions())
Create and return a new classifier set initialized for handling the given scenario. Usage: scenario = MUXProblem() model = algorithm.new_model(scenario) model.run(scenario, learn=True) Arguments: scenario: A Scenario instance. Return: A new, untrained classifier set, suited for the given scenario.
entailment
def run(self, scenario): """Run the algorithm, utilizing a classifier set to choose the most appropriate action for each situation produced by the scenario. Improve the situation/action mapping on each reward cycle to maximize reward. Return the classifier set that was created. Usage: scenario = MUXProblem() model = algorithm.run(scenario) Arguments: scenario: A Scenario instance. Return: A new classifier set, trained on the given scenario. """ assert isinstance(scenario, scenarios.Scenario) model = self.new_model(scenario) model.run(scenario, learn=True) return model
Run the algorithm, utilizing a classifier set to choose the most appropriate action for each situation produced by the scenario. Improve the situation/action mapping on each reward cycle to maximize reward. Return the classifier set that was created. Usage: scenario = MUXProblem() model = algorithm.run(scenario) Arguments: scenario: A Scenario instance. Return: A new classifier set, trained on the given scenario.
entailment
def _compute_prediction(self): """Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None """ total_weight = 0 total_prediction = 0 for rule in self._rules.values(): total_weight += rule.prediction_weight total_prediction += (rule.prediction * rule.prediction_weight) self._prediction = total_prediction / (total_weight or 1) self._prediction_weight = total_weight
Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None
entailment
def best_prediction(self): """The highest value from among the predictions made by the action sets in this match set.""" if self._best_prediction is None and self._action_sets: self._best_prediction = max( action_set.prediction for action_set in self._action_sets.values() ) return self._best_prediction
The highest value from among the predictions made by the action sets in this match set.
entailment
def best_actions(self): """A tuple containing the actions whose action sets have the best prediction.""" if self._best_actions is None: best_prediction = self.best_prediction self._best_actions = tuple( action for action, action_set in self._action_sets.items() if action_set.prediction == best_prediction ) return self._best_actions
A tuple containing the actions whose action sets have the best prediction.
entailment
def select_action(self): """Select an action according to the action selection strategy of the associated algorithm. If an action has already been selected, raise a ValueError instead. Usage: if match_set.selected_action is None: match_set.select_action() Arguments: None Return: The action that was selected by the action selection strategy. """ if self._selected_action is not None: raise ValueError("The action has already been selected.") strategy = self._algorithm.action_selection_strategy self._selected_action = strategy(self) return self._selected_action
Select an action according to the action selection strategy of the associated algorithm. If an action has already been selected, raise a ValueError instead. Usage: if match_set.selected_action is None: match_set.select_action() Arguments: None Return: The action that was selected by the action selection strategy.
entailment
def _set_selected_action(self, action): """Setter method for the selected_action property.""" assert action in self._action_sets if self._selected_action is not None: raise ValueError("The action has already been selected.") self._selected_action = action
Setter method for the selected_action property.
entailment
def _set_payoff(self, payoff): """Setter method for the payoff property.""" if self._selected_action is None: raise ValueError("The action has not been selected yet.") if self._closed: raise ValueError("The payoff for this match set has already" "been applied.") self._payoff = float(payoff)
Setter method for the payoff property.
entailment
def pay(self, predecessor): """If the predecessor is not None, gives the appropriate amount of payoff to the predecessor in payment for its contribution to this match set's expected future payoff. The predecessor argument should be either None or a MatchSet instance whose selected action led directly to this match set's situation. Usage: match_set = model.match(situation) match_set.pay(previous_match_set) Arguments: predecessor: The MatchSet instance which was produced by the same classifier set in response to the immediately preceding situation, or None if this is the first situation in the scenario. Return: None """ assert predecessor is None or isinstance(predecessor, MatchSet) if predecessor is not None: expectation = self._algorithm.get_future_expectation(self) predecessor.payoff += expectation
If the predecessor is not None, gives the appropriate amount of payoff to the predecessor in payment for its contribution to this match set's expected future payoff. The predecessor argument should be either None or a MatchSet instance whose selected action led directly to this match set's situation. Usage: match_set = model.match(situation) match_set.pay(previous_match_set) Arguments: predecessor: The MatchSet instance which was produced by the same classifier set in response to the immediately preceding situation, or None if this is the first situation in the scenario. Return: None
entailment
def apply_payoff(self): """Apply the payoff that has been accumulated from immediate reward and/or payments from successor match sets. Attempting to call this method before an action has been selected or after it has already been called for the same match set will result in a ValueError. Usage: match_set.select_action() match_set.payoff = reward match_set.apply_payoff() Arguments: None Return: None """ if self._selected_action is None: raise ValueError("The action has not been selected yet.") if self._closed: raise ValueError("The payoff for this match set has already" "been applied.") self._algorithm.distribute_payoff(self) self._payoff = 0 self._algorithm.update(self) self._closed = True
Apply the payoff that has been accumulated from immediate reward and/or payments from successor match sets. Attempting to call this method before an action has been selected or after it has already been called for the same match set will result in a ValueError. Usage: match_set.select_action() match_set.payoff = reward match_set.apply_payoff() Arguments: None Return: None
entailment
def match(self, situation): """Accept a situation (input) and return a MatchSet containing the classifier rules whose conditions match the situation. If appropriate per the algorithm managing this classifier set, create new rules to ensure sufficient coverage of the possible actions. Usage: match_set = model.match(situation) Arguments: situation: The situation for which a match set is desired. Return: A MatchSet instance for the given situation, drawn from the classifier rules in this classifier set. """ # Find the conditions that match against the current situation, and # group them according to which action(s) they recommend. by_action = {} for condition, actions in self._population.items(): if not condition(situation): continue for action, rule in actions.items(): if action in by_action: by_action[action][condition] = rule else: by_action[action] = {condition: rule} # Construct the match set. match_set = MatchSet(self, situation, by_action) # If an insufficient number of actions are recommended, create some # new rules (condition/action pairs) until there are enough actions # being recommended. if self._algorithm.covering_is_required(match_set): # Ask the algorithm to provide a new classifier rule to add to # the population. rule = self._algorithm.cover(match_set) # Ensure that the condition provided by the algorithm does # indeed match the situation. If not, there is a bug in the # algorithm. assert rule.condition(situation) # Add the new classifier, getting back a list of the rule(s) # which had to be removed to make room for it. replaced = self.add(rule) # Remove the rules that were removed the population from the # action set, as well. Note that they may not appear in the # action set, in which case nothing is done. for replaced_rule in replaced: action = replaced_rule.action condition = replaced_rule.condition if action in by_action and condition in by_action[action]: del by_action[action][condition] if not by_action[action]: del by_action[action] # Add the new classifier to the action set. This is done after # the replaced rules are removed, just in case the algorithm # provided us with a rule that was already present and was # displaced. if rule.action not in by_action: by_action[rule.action] = {} by_action[rule.action][rule.condition] = rule # Reconstruct the match set with the modifications we just # made. match_set = MatchSet(self, situation, by_action) # Return the newly created match set. return match_set
Accept a situation (input) and return a MatchSet containing the classifier rules whose conditions match the situation. If appropriate per the algorithm managing this classifier set, create new rules to ensure sufficient coverage of the possible actions. Usage: match_set = model.match(situation) Arguments: situation: The situation for which a match set is desired. Return: A MatchSet instance for the given situation, drawn from the classifier rules in this classifier set.
entailment
def add(self, rule): """Add a new classifier rule to the classifier set. Return a list containing zero or more rules that were deleted from the classifier by the algorithm in order to make room for the new rule. The rule argument should be a ClassifierRule instance. The behavior of this method depends on whether the rule already exists in the classifier set. When a rule is already present, the rule's numerosity is added to that of the version of the rule already present in the population. Otherwise, the new rule is captured. Note that this means that for rules already present in the classifier set, the metadata of the existing rule is not overwritten by that of the one passed in as an argument. Usage: displaced_rules = model.add(rule) Arguments: rule: A ClassifierRule instance which is to be added to this classifier set. Return: A possibly empty list of ClassifierRule instances which were removed altogether from the classifier set (as opposed to simply having their numerosities decremented) in order to make room for the newly added rule. """ assert isinstance(rule, ClassifierRule) condition = rule.condition action = rule.action # If the rule already exists in the population, then we virtually # add the rule by incrementing the existing rule's numerosity. This # prevents redundancy in the rule set. Otherwise we capture the # new rule. if condition not in self._population: self._population[condition] = {} if action in self._population[condition]: existing_rule = self._population[condition][action] existing_rule.numerosity += rule.numerosity else: self._population[condition][action] = rule # Any time we add a rule, we need to call this to keep the # population size under control. return self._algorithm.prune(self)
Add a new classifier rule to the classifier set. Return a list containing zero or more rules that were deleted from the classifier by the algorithm in order to make room for the new rule. The rule argument should be a ClassifierRule instance. The behavior of this method depends on whether the rule already exists in the classifier set. When a rule is already present, the rule's numerosity is added to that of the version of the rule already present in the population. Otherwise, the new rule is captured. Note that this means that for rules already present in the classifier set, the metadata of the existing rule is not overwritten by that of the one passed in as an argument. Usage: displaced_rules = model.add(rule) Arguments: rule: A ClassifierRule instance which is to be added to this classifier set. Return: A possibly empty list of ClassifierRule instances which were removed altogether from the classifier set (as opposed to simply having their numerosities decremented) in order to make room for the newly added rule.
entailment
def discard(self, rule, count=1): """Remove one or more instances of a rule from the classifier set. Return a Boolean indicating whether the rule's numerosity dropped to zero. (If the rule's numerosity was already zero, do nothing and return False.) Usage: if rule in model and model.discard(rule, count=3): print("Rule numerosity dropped to zero.") Arguments: rule: A ClassifierRule instance whose numerosity is to be decremented. count: An int, the size of the decrement to the rule's numerosity; default is 1. Return: A bool indicating whether the rule was removed altogether from the classifier set, as opposed to simply having its numerosity decremented. """ assert isinstance(rule, ClassifierRule) assert isinstance(count, int) and count >= 0 rule = self.get(rule) if rule is None: return False # Only actually remove the rule if its numerosity drops below 1. rule.numerosity -= count if rule.numerosity <= 0: # Ensure that if there is still a reference to this rule # elsewhere, its numerosity is still well-defined. rule.numerosity = 0 del self._population[rule.condition][rule.action] if not self._population[rule.condition]: del self._population[rule.condition] return True return False
Remove one or more instances of a rule from the classifier set. Return a Boolean indicating whether the rule's numerosity dropped to zero. (If the rule's numerosity was already zero, do nothing and return False.) Usage: if rule in model and model.discard(rule, count=3): print("Rule numerosity dropped to zero.") Arguments: rule: A ClassifierRule instance whose numerosity is to be decremented. count: An int, the size of the decrement to the rule's numerosity; default is 1. Return: A bool indicating whether the rule was removed altogether from the classifier set, as opposed to simply having its numerosity decremented.
entailment
def get(self, rule, default=None): """Return the existing version of the given rule. If the rule is not present in the classifier set, return the default. If no default was given, use None. This is useful for eliminating duplicate copies of rules. Usage: unique_rule = model.get(possible_duplicate, possible_duplicate) Arguments: rule: The ClassifierRule instance which may be a duplicate of another already contained in the classifier set. default: The value returned if the rule is not a duplicate of another already contained in the classifier set. Return: If the rule is a duplicate of another already contained in the classifier set, the existing one is returned. Otherwise, the value of default is returned. """ assert isinstance(rule, ClassifierRule) if (rule.condition not in self._population or rule.action not in self._population[rule.condition]): return default return self._population[rule.condition][rule.action]
Return the existing version of the given rule. If the rule is not present in the classifier set, return the default. If no default was given, use None. This is useful for eliminating duplicate copies of rules. Usage: unique_rule = model.get(possible_duplicate, possible_duplicate) Arguments: rule: The ClassifierRule instance which may be a duplicate of another already contained in the classifier set. default: The value returned if the rule is not a duplicate of another already contained in the classifier set. Return: If the rule is a duplicate of another already contained in the classifier set, the existing one is returned. Otherwise, the value of default is returned.
entailment
def run(self, scenario, learn=True): """Run the algorithm, utilizing the classifier set to choose the most appropriate action for each situation produced by the scenario. If learn is True, improve the situation/action mapping to maximize reward. Otherwise, ignore any reward received. Usage: model.run(scenario, learn=True) Arguments: scenario: A Scenario instance which this classifier set is to interact with. learn: A bool indicating whether the classifier set should attempt to optimize its performance based on reward received for each action, as opposed to simply using what it has already learned from previous runs and ignoring reward received; default is True. Return: None """ assert isinstance(scenario, scenarios.Scenario) previous_match_set = None # Repeat until the scenario has run its course. while scenario.more(): # Gather information about the current state of the # environment. situation = scenario.sense() # Determine which rules match the current situation. match_set = self.match(situation) # Select the best action for the current situation (or a random # one, if we are on an exploration step). match_set.select_action() # Perform the selected action # and find out what the received reward was. reward = scenario.execute(match_set.selected_action) # If the scenario is dynamic, don't immediately apply the # reward; instead, wait until the next iteration and factor in # not only the reward that was received on the previous step, # but the (discounted) reward that is expected going forward # given the resulting situation observed after the action was # taken. This is a classic feature of temporal difference (TD) # algorithms, which acts to stitch together a general picture # of the future expected reward without actually waiting the # full duration to find out what it will be. if learn: # Ensure we are not trying to learn in a non-learning # scenario. assert reward is not None if scenario.is_dynamic: if previous_match_set is not None: match_set.pay(previous_match_set) previous_match_set.apply_payoff() match_set.payoff = reward # Remember the current reward and match set for the # next iteration. previous_match_set = match_set else: match_set.payoff = reward match_set.apply_payoff() # This serves to tie off the final stitch. The last action taken # gets only the immediate reward; there is no future reward # expected. if learn and previous_match_set is not None: previous_match_set.apply_payoff()
Run the algorithm, utilizing the classifier set to choose the most appropriate action for each situation produced by the scenario. If learn is True, improve the situation/action mapping to maximize reward. Otherwise, ignore any reward received. Usage: model.run(scenario, learn=True) Arguments: scenario: A Scenario instance which this classifier set is to interact with. learn: A bool indicating whether the classifier set should attempt to optimize its performance based on reward received for each action, as opposed to simply using what it has already learned from previous runs and ignoring reward received; default is True. Return: None
entailment
def ls(system, user, local, include_missing): """List configuration files detected (and/or examined paths).""" # default action is to list *all* auto-detected files if not (system or user or local): system = user = local = True for path in get_configfile_paths(system=system, user=user, local=local, only_existing=not include_missing): click.echo(path)
List configuration files detected (and/or examined paths).
entailment
def inspect(config_file, profile): """Inspect existing configuration/profile.""" try: section = load_profile_from_files( [config_file] if config_file else None, profile) click.echo("Configuration file: {}".format(config_file if config_file else "auto-detected")) click.echo("Profile: {}".format(profile if profile else "auto-detected")) click.echo("---") for key, val in section.items(): click.echo("{} = {}".format(key, val)) except (ValueError, ConfigFileReadError, ConfigFileParseError) as e: click.echo(e)
Inspect existing configuration/profile.
entailment
def create(config_file, profile): """Create and/or update cloud client configuration file.""" # determine the config file path if config_file: click.echo("Using configuration file: {}".format(config_file)) else: # path not given, try to detect; or use default, but allow user to override config_file = get_configfile_path() if config_file: click.echo("Found existing configuration file: {}".format(config_file)) else: config_file = get_default_configfile_path() click.echo("Configuration file not found; the default location is: {}".format(config_file)) config_file = default_text_input("Configuration file path", config_file) config_file = os.path.expanduser(config_file) # create config_file path config_base = os.path.dirname(config_file) if config_base and not os.path.exists(config_base): if click.confirm("Configuration file path does not exist. Create it?", abort=True): try: os.makedirs(config_base) except Exception as e: click.echo("Error creating configuration path: {}".format(e)) return 1 # try loading existing config, or use defaults try: config = load_config_from_files([config_file]) except: config = get_default_config() # determine profile if profile: click.echo("Using profile: {}".format(profile)) else: existing = config.sections() if existing: profiles = 'create new or choose from: {}'.format(', '.join(existing)) default_profile = '' else: profiles = 'create new' default_profile = 'prod' profile = default_text_input("Profile (%s)" % profiles, default_profile, optional=False) if not config.has_section(profile): config.add_section(profile) # fill out the profile variables variables = 'endpoint token client solver proxy'.split() prompts = ['API endpoint URL', 'Authentication token', 'Default client class (qpu or sw)', 'Default solver'] for var, prompt in zip(variables, prompts): default_val = config.get(profile, var, fallback=None) val = default_text_input(prompt, default_val) if val: val = os.path.expandvars(val) if val != default_val: config.set(profile, var, val) try: with open(config_file, 'w') as fp: config.write(fp) except Exception as e: click.echo("Error writing to configuration file: {}".format(e)) return 2 click.echo("Configuration saved.") return 0
Create and/or update cloud client configuration file.
entailment
def _ping(config_file, profile, solver_def, request_timeout, polling_timeout, output): """Helper method for the ping command that uses `output()` for info output and raises `CLIError()` on handled errors. This function is invariant to output format and/or error signaling mechanism. """ config = dict(config_file=config_file, profile=profile, solver=solver_def) if request_timeout is not None: config.update(request_timeout=request_timeout) if polling_timeout is not None: config.update(polling_timeout=polling_timeout) try: client = Client.from_config(**config) except Exception as e: raise CLIError("Invalid configuration: {}".format(e), code=1) if config_file: output("Using configuration file: {config_file}", config_file=config_file) if profile: output("Using profile: {profile}", profile=profile) output("Using endpoint: {endpoint}", endpoint=client.endpoint) t0 = timer() try: solver = client.get_solver() except SolverAuthenticationError: raise CLIError("Authentication error. Check credentials in your configuration file.", 2) except SolverNotFoundError: raise CLIError("Solver not available.", 6) except (InvalidAPIResponseError, UnsupportedSolverError): raise CLIError("Invalid or unexpected API response.", 3) except RequestTimeout: raise CLIError("API connection timed out.", 4) except requests.exceptions.SSLError as e: # we need to handle `ssl.SSLError` wrapped in several exceptions, # with differences between py2/3; greping the message is the easiest way if 'CERTIFICATE_VERIFY_FAILED' in str(e): raise CLIError( "Certificate verification failed. Please check that your API endpoint " "is correct. If you are connecting to a private or third-party D-Wave " "system that uses self-signed certificate(s), please see " "https://support.dwavesys.com/hc/en-us/community/posts/360018930954.", 5) raise CLIError("Unexpected SSL error while fetching solver: {!r}".format(e), 5) except Exception as e: raise CLIError("Unexpected error while fetching solver: {!r}".format(e), 5) t1 = timer() output("Using solver: {solver_id}", solver_id=solver.id) try: future = solver.sample_ising({0: 1}, {}) timing = future.timing except RequestTimeout: raise CLIError("API connection timed out.", 8) except PollingTimeout: raise CLIError("Polling timeout exceeded.", 9) except Exception as e: raise CLIError("Sampling error: {!r}".format(e), 10) finally: output("Submitted problem ID: {problem_id}", problem_id=future.id) t2 = timer() output("\nWall clock time:") output(" * Solver definition fetch: {wallclock_solver_definition:.3f} ms", wallclock_solver_definition=(t1-t0)*1000.0) output(" * Problem submit and results fetch: {wallclock_sampling:.3f} ms", wallclock_sampling=(t2-t1)*1000.0) output(" * Total: {wallclock_total:.3f} ms", wallclock_total=(t2-t0)*1000.0) if timing.items(): output("\nQPU timing:") for component, duration in timing.items(): output(" * %(name)s = {%(name)s} us" % {"name": component}, **{component: duration}) else: output("\nQPU timing data not available.")
Helper method for the ping command that uses `output()` for info output and raises `CLIError()` on handled errors. This function is invariant to output format and/or error signaling mechanism.
entailment
def ping(config_file, profile, solver_def, json_output, request_timeout, polling_timeout): """Ping the QPU by submitting a single-qubit problem.""" now = utcnow() info = dict(datetime=now.isoformat(), timestamp=datetime_to_timestamp(now), code=0) def output(fmt, **kwargs): info.update(kwargs) if not json_output: click.echo(fmt.format(**kwargs)) def flush(): if json_output: click.echo(json.dumps(info)) try: _ping(config_file, profile, solver_def, request_timeout, polling_timeout, output) except CLIError as error: output("Error: {error} (code: {code})", error=str(error), code=error.code) sys.exit(error.code) except Exception as error: output("Unhandled error: {error}", error=str(error)) sys.exit(127) finally: flush()
Ping the QPU by submitting a single-qubit problem.
entailment
def solvers(config_file, profile, solver_def, list_solvers): """Get solver details. Unless solver name/id specified, fetch and display details for all online solvers available on the configured endpoint. """ with Client.from_config( config_file=config_file, profile=profile, solver=solver_def) as client: try: solvers = client.get_solvers(**client.default_solver) except SolverNotFoundError: click.echo("Solver(s) {} not found.".format(solver_def)) return 1 if list_solvers: for solver in solvers: click.echo(solver.id) return # ~YAML output for solver in solvers: click.echo("Solver: {}".format(solver.id)) click.echo(" Parameters:") for name, val in sorted(solver.parameters.items()): click.echo(" {}: {}".format(name, strtrunc(val) if val else '?')) solver.properties.pop('parameters', None) click.echo(" Properties:") for name, val in sorted(solver.properties.items()): click.echo(" {}: {}".format(name, strtrunc(val))) click.echo(" Derived properties:") for name in sorted(solver.derived_properties): click.echo(" {}: {}".format(name, strtrunc(getattr(solver, name)))) click.echo()
Get solver details. Unless solver name/id specified, fetch and display details for all online solvers available on the configured endpoint.
entailment
def sample(config_file, profile, solver_def, biases, couplings, random_problem, num_reads, verbose): """Submit Ising-formulated problem and return samples.""" # TODO: de-dup wrt ping def echo(s, maxlen=100): click.echo(s if verbose else strtrunc(s, maxlen)) try: client = Client.from_config( config_file=config_file, profile=profile, solver=solver_def) except Exception as e: click.echo("Invalid configuration: {}".format(e)) return 1 if config_file: echo("Using configuration file: {}".format(config_file)) if profile: echo("Using profile: {}".format(profile)) echo("Using endpoint: {}".format(client.endpoint)) try: solver = client.get_solver() except SolverAuthenticationError: click.echo("Authentication error. Check credentials in your configuration file.") return 1 except (InvalidAPIResponseError, UnsupportedSolverError): click.echo("Invalid or unexpected API response.") return 2 except SolverNotFoundError: click.echo("Solver with the specified features does not exist.") return 3 echo("Using solver: {}".format(solver.id)) if random_problem: linear, quadratic = generate_random_ising_problem(solver) else: try: linear = ast.literal_eval(biases) if biases else [] except Exception as e: click.echo("Invalid biases: {}".format(e)) try: quadratic = ast.literal_eval(couplings) if couplings else {} except Exception as e: click.echo("Invalid couplings: {}".format(e)) echo("Using qubit biases: {!r}".format(linear)) echo("Using qubit couplings: {!r}".format(quadratic)) echo("Number of samples: {}".format(num_reads)) try: result = solver.sample_ising(linear, quadratic, num_reads=num_reads).result() except Exception as e: click.echo(e) return 4 if verbose: click.echo("Result: {!r}".format(result)) echo("Samples: {!r}".format(result['samples'])) echo("Occurrences: {!r}".format(result['occurrences'])) echo("Energies: {!r}".format(result['energies']))
Submit Ising-formulated problem and return samples.
entailment
def get_input_callback(samplerate, params, num_samples=256): """Return a function that produces samples of a sine. Parameters ---------- samplerate : float The sample rate. params : dict Parameters for FM generation. num_samples : int, optional Number of samples to be generated on each call. """ amplitude = params['mod_amplitude'] frequency = params['mod_frequency'] def producer(): """Generate samples. Yields ------ samples : ndarray A number of samples (`num_samples`) of the sine. """ start_time = 0 while True: time = start_time + np.arange(num_samples) / samplerate start_time += num_samples / samplerate output = amplitude * np.cos(2 * np.pi * frequency * time) yield output return lambda p=producer(): next(p)
Return a function that produces samples of a sine. Parameters ---------- samplerate : float The sample rate. params : dict Parameters for FM generation. num_samples : int, optional Number of samples to be generated on each call.
entailment
def get_playback_callback(resampler, samplerate, params): """Return a sound playback callback. Parameters ---------- resampler The resampler from which samples are read. samplerate : float The sample rate. params : dict Parameters for FM generation. """ def callback(outdata, frames, time, _): """Playback callback. Read samples from the resampler and modulate them onto a carrier frequency. """ last_fmphase = getattr(callback, 'last_fmphase', 0) df = params['fm_gain'] * resampler.read(frames) df = np.pad(df, (0, frames - len(df)), mode='constant') t = time.outputBufferDacTime + np.arange(frames) / samplerate phase = 2 * np.pi * params['carrier_frequency'] * t fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase) callback.last_fmphase = fmphase[-1] return callback
Return a sound playback callback. Parameters ---------- resampler The resampler from which samples are read. samplerate : float The sample rate. params : dict Parameters for FM generation.
entailment
def main(source_samplerate, target_samplerate, params, converter_type): """Setup the resampling and audio output callbacks and start playback.""" from time import sleep ratio = target_samplerate / source_samplerate with sr.CallbackResampler(get_input_callback(source_samplerate, params), ratio, converter_type) as resampler, \ sd.OutputStream(channels=1, samplerate=target_samplerate, callback=get_playback_callback( resampler, target_samplerate, params)): print("Playing back... Ctrl+C to stop.") try: while True: sleep(1) except KeyboardInterrupt: print("Aborting.")
Setup the resampling and audio output callbacks and start playback.
entailment
def max_num_reads(self, **params): """Returns the maximum number of reads for the given solver parameters. Args: **params: Parameters for the sampling method. Relevant to num_reads: - annealing_time - readout_thermalization - num_reads - programming_thermalization Returns: int: The maximum number of reads. """ # dev note: in the future it would be good to have a way of doing this # server-side, as we are duplicating logic here. properties = self.properties if self.software or not params: # software solvers don't use any of the above parameters return properties['num_reads_range'][1] # qpu _, duration = properties['problem_run_duration_range'] annealing_time = params.get('annealing_time', properties['default_annealing_time']) readout_thermalization = params.get('readout_thermalization', properties['default_readout_thermalization']) programming_thermalization = params.get('programming_thermalization', properties['default_programming_thermalization']) return min(properties['num_reads_range'][1], int((duration - programming_thermalization) / (annealing_time + readout_thermalization)))
Returns the maximum number of reads for the given solver parameters. Args: **params: Parameters for the sampling method. Relevant to num_reads: - annealing_time - readout_thermalization - num_reads - programming_thermalization Returns: int: The maximum number of reads.
entailment
def sample_ising(self, linear, quadratic, **params): """Sample from the specified Ising model. Args: linear (list/dict): Linear terms of the model (h). quadratic (dict of (int, int):float): Quadratic terms of the model (J). **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a simple :term:`Ising` problem (opposite linear biases on two coupled qubits), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... computation = solver.sample_ising({u: -1, v: 1},{}, num_reads=5) # doctest: +SKIP ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (1, -1) (1, -1) (1, -1) (1, -1) (1, -1) """ # Our linear and quadratic objective terms are already separated in an # ising model so we can just directly call `_sample`. return self._sample('ising', linear, quadratic, params)
Sample from the specified Ising model. Args: linear (list/dict): Linear terms of the model (h). quadratic (dict of (int, int):float): Quadratic terms of the model (J). **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a simple :term:`Ising` problem (opposite linear biases on two coupled qubits), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... computation = solver.sample_ising({u: -1, v: 1},{}, num_reads=5) # doctest: +SKIP ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (1, -1) (1, -1) (1, -1) (1, -1) (1, -1)
entailment
def sample_qubo(self, qubo, **params): """Sample from the specified QUBO. Args: qubo (dict of (int, int):float): Coefficients of a quadratic unconstrained binary optimization (QUBO) model. **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1} ... computation = solver.sample_qubo(Q, num_reads=5) ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (0, 1) (1, 0) (1, 0) (0, 1) (1, 0) """ # In a QUBO the linear and quadratic terms in the objective are mixed into # a matrix. For the sake of encoding, we will separate them before calling `_sample` linear = {i1: v for (i1, i2), v in uniform_iterator(qubo) if i1 == i2} quadratic = {(i1, i2): v for (i1, i2), v in uniform_iterator(qubo) if i1 != i2} return self._sample('qubo', linear, quadratic, params)
Sample from the specified QUBO. Args: qubo (dict of (int, int):float): Coefficients of a quadratic unconstrained binary optimization (QUBO) model. **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1} ... computation = solver.sample_qubo(Q, num_reads=5) ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (0, 1) (1, 0) (1, 0) (0, 1) (1, 0)
entailment
def _sample(self, type_, linear, quadratic, params): """Internal method for both sample_ising and sample_qubo. Args: linear (list/dict): Linear terms of the model. quadratic (dict of (int, int):float): Quadratic terms of the model. **params: Parameters for the sampling method, specified per solver. Returns: :obj: `Future` """ # Check the problem if not self.check_problem(linear, quadratic): raise ValueError("Problem graph incompatible with solver.") # Mix the new parameters with the default parameters combined_params = dict(self._params) combined_params.update(params) # Check the parameters before submitting for key in combined_params: if key not in self.parameters and not key.startswith('x_'): raise KeyError("{} is not a parameter of this solver.".format(key)) # transform some of the parameters in-place self._format_params(type_, combined_params) body = json.dumps({ 'solver': self.id, 'data': encode_bqm_as_qp(self, linear, quadratic), 'type': type_, 'params': combined_params }) _LOGGER.trace("Encoded sample request: %s", body) future = Future(solver=self, id_=None, return_matrix=self.return_matrix, submission_data=(type_, linear, quadratic, params)) _LOGGER.debug("Submitting new problem to: %s", self.id) self.client._submit(body, future) return future
Internal method for both sample_ising and sample_qubo. Args: linear (list/dict): Linear terms of the model. quadratic (dict of (int, int):float): Quadratic terms of the model. **params: Parameters for the sampling method, specified per solver. Returns: :obj: `Future`
entailment
def _format_params(self, type_, params): """Reformat some of the parameters for sapi.""" if 'initial_state' in params: # NB: at this moment the error raised when initial_state does not match lin/quad (in # active qubits) is not very informative, but there is also no clean way to check here # that they match because lin can be either a list or a dict. In the future it would be # good to check. initial_state = params['initial_state'] if isinstance(initial_state, Mapping): initial_state_list = [3]*self.properties['num_qubits'] low = -1 if type_ == 'ising' else 0 for v, val in initial_state.items(): if val == 3: continue if val <= 0: initial_state_list[v] = low else: initial_state_list[v] = 1 params['initial_state'] = initial_state_list
Reformat some of the parameters for sapi.
entailment
def check_problem(self, linear, quadratic): """Test if an Ising model matches the graph provided by the solver. Args: linear (list/dict): Linear terms of the model (h). quadratic (dict of (int, int):float): Quadratic terms of the model (J). Returns: boolean Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, and tests a simple :term:`Ising` model for two target embeddings (that is, representations of the model's graph by coupled qubits on the QPU's sparsely connected graph), where only the second is valid. >>> from dwave.cloud import Client >>> print((0, 1) in solver.edges) # doctest: +SKIP False >>> print((0, 4) in solver.edges) # doctest: +SKIP True >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... print(solver.check_problem({0: -1, 1: 1},{(0, 1):0.5})) ... print(solver.check_problem({0: -1, 4: 1},{(0, 4):0.5})) ... False True """ for key, value in uniform_iterator(linear): if value != 0 and key not in self.nodes: return False for key, value in uniform_iterator(quadratic): if value != 0 and tuple(key) not in self.edges: return False return True
Test if an Ising model matches the graph provided by the solver. Args: linear (list/dict): Linear terms of the model (h). quadratic (dict of (int, int):float): Quadratic terms of the model (J). Returns: boolean Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, and tests a simple :term:`Ising` model for two target embeddings (that is, representations of the model's graph by coupled qubits on the QPU's sparsely connected graph), where only the second is valid. >>> from dwave.cloud import Client >>> print((0, 1) in solver.edges) # doctest: +SKIP False >>> print((0, 4) in solver.edges) # doctest: +SKIP True >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... print(solver.check_problem({0: -1, 1: 1},{(0, 1):0.5})) ... print(solver.check_problem({0: -1, 4: 1},{(0, 4):0.5})) ... False True
entailment
def _retrieve_problem(self, id_): """Resume polling for a problem previously submitted. Args: id_: Identification of the query. Returns: :obj: `Future` """ future = Future(self, id_, self.return_matrix, None) self.client._poll(future) return future
Resume polling for a problem previously submitted. Args: id_: Identification of the query. Returns: :obj: `Future`
entailment
def _get_converter_type(identifier): """Return the converter type for `identifier`.""" if isinstance(identifier, str): return ConverterType[identifier] if isinstance(identifier, ConverterType): return identifier return ConverterType(identifier)
Return the converter type for `identifier`.
entailment
def resample(input_data, ratio, converter_type='sinc_best', verbose=False): """Resample the signal in `input_data` at once. Parameters ---------- input_data : ndarray Input data. A single channel is provided as a 1D array of `num_frames` length. Input data with several channels is represented as a 2D array of shape (`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data` is converted to 32-bit float and C (row-major) memory order. ratio : float Conversion ratio = output sample rate / input sample rate. converter_type : ConverterType, str, or int Sample rate converter. verbose : bool If `True`, print additional information about the conversion. Returns ------- output_data : ndarray Resampled input data. Note ---- If samples are to be processed in chunks, `Resampler` and `CallbackResampler` will provide better results and allow for variable conversion ratios. """ from samplerate.lowlevel import src_simple from samplerate.exceptions import ResamplingError input_data = np.require(input_data, requirements='C', dtype=np.float32) if input_data.ndim == 2: num_frames, channels = input_data.shape output_shape = (int(num_frames * ratio), channels) elif input_data.ndim == 1: num_frames, channels = input_data.size, 1 output_shape = (int(num_frames * ratio), ) else: raise ValueError('rank > 2 not supported') output_data = np.empty(output_shape, dtype=np.float32) converter_type = _get_converter_type(converter_type) (error, input_frames_used, output_frames_gen) \ = src_simple(input_data, output_data, ratio, converter_type.value, channels) if error != 0: raise ResamplingError(error) if verbose: info = ('samplerate info:\n' '{} input frames used\n' '{} output frames generated\n' .format(input_frames_used, output_frames_gen)) print(info) return (output_data[:output_frames_gen, :] if channels > 1 else output_data[:output_frames_gen])
Resample the signal in `input_data` at once. Parameters ---------- input_data : ndarray Input data. A single channel is provided as a 1D array of `num_frames` length. Input data with several channels is represented as a 2D array of shape (`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data` is converted to 32-bit float and C (row-major) memory order. ratio : float Conversion ratio = output sample rate / input sample rate. converter_type : ConverterType, str, or int Sample rate converter. verbose : bool If `True`, print additional information about the conversion. Returns ------- output_data : ndarray Resampled input data. Note ---- If samples are to be processed in chunks, `Resampler` and `CallbackResampler` will provide better results and allow for variable conversion ratios.
entailment
def set_ratio(self, new_ratio): """Set a new conversion ratio immediately.""" from samplerate.lowlevel import src_set_ratio return src_set_ratio(self._state, new_ratio)
Set a new conversion ratio immediately.
entailment
def process(self, input_data, ratio, end_of_input=False, verbose=False): """Resample the signal in `input_data`. Parameters ---------- input_data : ndarray Input data. A single channel is provided as a 1D array of `num_frames` length. Input data with several channels is represented as a 2D array of shape (`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data` is converted to 32-bit float and C (row-major) memory order. ratio : float Conversion ratio = output sample rate / input sample rate. end_of_input : int Set to `True` if no more data is available, or to `False` otherwise. verbose : bool If `True`, print additional information about the conversion. Returns ------- output_data : ndarray Resampled input data. """ from samplerate.lowlevel import src_process from samplerate.exceptions import ResamplingError input_data = np.require(input_data, requirements='C', dtype=np.float32) if input_data.ndim == 2: num_frames, channels = input_data.shape output_shape = (int(num_frames * ratio), channels) elif input_data.ndim == 1: num_frames, channels = input_data.size, 1 output_shape = (int(num_frames * ratio), ) else: raise ValueError('rank > 2 not supported') if channels != self._channels: raise ValueError('Invalid number of channels in input data.') output_data = np.empty(output_shape, dtype=np.float32) (error, input_frames_used, output_frames_gen) = src_process( self._state, input_data, output_data, ratio, end_of_input) if error != 0: raise ResamplingError(error) if verbose: info = ('samplerate info:\n' '{} input frames used\n' '{} output frames generated\n' .format(input_frames_used, output_frames_gen)) print(info) return (output_data[:output_frames_gen, :] if channels > 1 else output_data[:output_frames_gen])
Resample the signal in `input_data`. Parameters ---------- input_data : ndarray Input data. A single channel is provided as a 1D array of `num_frames` length. Input data with several channels is represented as a 2D array of shape (`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data` is converted to 32-bit float and C (row-major) memory order. ratio : float Conversion ratio = output sample rate / input sample rate. end_of_input : int Set to `True` if no more data is available, or to `False` otherwise. verbose : bool If `True`, print additional information about the conversion. Returns ------- output_data : ndarray Resampled input data.
entailment
def _create(self): """Create new callback resampler.""" from samplerate.lowlevel import ffi, src_callback_new, src_delete from samplerate.exceptions import ResamplingError state, handle, error = src_callback_new( self._callback, self._converter_type.value, self._channels) if error != 0: raise ResamplingError(error) self._state = ffi.gc(state, src_delete) self._handle = handle
Create new callback resampler.
entailment
def set_starting_ratio(self, ratio): """ Set the starting conversion ratio for the next `read` call. """ from samplerate.lowlevel import src_set_ratio if self._state is None: self._create() src_set_ratio(self._state, ratio) self.ratio = ratio
Set the starting conversion ratio for the next `read` call.
entailment
def reset(self): """Reset state.""" from samplerate.lowlevel import src_reset if self._state is None: self._create() src_reset(self._state)
Reset state.
entailment
def read(self, num_frames): """Read a number of frames from the resampler. Parameters ---------- num_frames : int Number of frames to read. Returns ------- output_data : ndarray Resampled frames as a (`num_output_frames`, `num_channels`) or (`num_output_frames`,) array. Note that this may return fewer frames than requested, for example when no more input is available. """ from samplerate.lowlevel import src_callback_read, src_error from samplerate.exceptions import ResamplingError if self._state is None: self._create() if self._channels > 1: output_shape = (num_frames, self._channels) elif self._channels == 1: output_shape = (num_frames, ) output_data = np.empty(output_shape, dtype=np.float32) ret = src_callback_read(self._state, self._ratio, num_frames, output_data) if ret == 0: error = src_error(self._state) if error: raise ResamplingError(error) return (output_data[:ret, :] if self._channels > 1 else output_data[:ret])
Read a number of frames from the resampler. Parameters ---------- num_frames : int Number of frames to read. Returns ------- output_data : ndarray Resampled frames as a (`num_output_frames`, `num_channels`) or (`num_output_frames`,) array. Note that this may return fewer frames than requested, for example when no more input is available.
entailment
def get_variance(seq): """ Batch variance calculation. """ m = get_mean(seq) return sum((v-m)**2 for v in seq)/float(len(seq))
Batch variance calculation.
entailment
def mean_absolute_error(seq, correct): """ Batch mean absolute error calculation. """ assert len(seq) == len(correct) diffs = [abs(a-b) for a, b in zip(seq, correct)] return sum(diffs)/float(len(diffs))
Batch mean absolute error calculation.
entailment
def normalize(seq): """ Scales each number in the sequence so that the sum of all numbers equals 1. """ s = float(sum(seq)) return [v/s for v in seq]
Scales each number in the sequence so that the sum of all numbers equals 1.
entailment
def normcdf(x, mu, sigma): """ Describes the probability that a real-valued random variable X with a given probability distribution will be found at a value less than or equal to X in a normal distribution. http://en.wikipedia.org/wiki/Cumulative_distribution_function """ t = x-mu y = 0.5*erfcc(-t/(sigma*math.sqrt(2.0))) if y > 1.0: y = 1.0 return y
Describes the probability that a real-valued random variable X with a given probability distribution will be found at a value less than or equal to X in a normal distribution. http://en.wikipedia.org/wiki/Cumulative_distribution_function
entailment
def normpdf(x, mu, sigma): """ Describes the relative likelihood that a real-valued random variable X will take on a given value. http://en.wikipedia.org/wiki/Probability_density_function """ u = (x-mu)/abs(sigma) y = (1/(math.sqrt(2*pi)*abs(sigma)))*math.exp(-u*u/2) return y
Describes the relative likelihood that a real-valued random variable X will take on a given value. http://en.wikipedia.org/wiki/Probability_density_function
entailment
def entropy(data, class_attr=None, method=DEFAULT_DISCRETE_METRIC): """ Calculates the entropy of the attribute attr in given data set data. Parameters: data<dict|list> := if dict, treated as value counts of the given attribute name if list, treated as a raw list from which the value counts will be generated attr<string> := the name of the class attribute """ assert (class_attr is None and isinstance(data, dict)) \ or (class_attr is not None and isinstance(data, list)) if isinstance(data, dict): counts = data else: counts = defaultdict(float) # {attr:count} for record in data: # Note: A missing attribute is treated like an attribute with a value # of None, representing the attribute is "irrelevant". counts[record.get(class_attr)] += 1.0 len_data = float(sum(cnt for _, cnt in iteritems(counts))) n = max(2, len(counts)) total = float(sum(counts.values())) assert total, "There must be at least one non-zero count." try: #return -sum((count/total)*math.log(count/total,n) for count in counts) if method == ENTROPY1: return -sum((count/len_data)*math.log(count/len_data, n) for count in itervalues(counts) if count) elif method == ENTROPY2: return -sum((count/len_data)*math.log(count/len_data, n) for count in itervalues(counts) if count) - ((len(counts)-1)/float(total)) elif method == ENTROPY3: return -sum((count/len_data)*math.log(count/len_data, n) for count in itervalues(counts) if count) - 100*((len(counts)-1)/float(total)) else: raise Exception("Unknown entropy method %s." % method) except Exception: raise
Calculates the entropy of the attribute attr in given data set data. Parameters: data<dict|list> := if dict, treated as value counts of the given attribute name if list, treated as a raw list from which the value counts will be generated attr<string> := the name of the class attribute
entailment
def entropy_variance(data, class_attr=None, method=DEFAULT_CONTINUOUS_METRIC): """ Calculates the variance fo a continuous class attribute, to be used as an entropy metric. """ assert method in CONTINUOUS_METRICS, "Unknown entropy variance metric: %s" % (method,) assert (class_attr is None and isinstance(data, dict)) \ or (class_attr is not None and isinstance(data, list)) if isinstance(data, dict): lst = data else: lst = [record.get(class_attr) for record in data] return get_variance(lst)
Calculates the variance fo a continuous class attribute, to be used as an entropy metric.
entailment
def get_gain(data, attr, class_attr, method=DEFAULT_DISCRETE_METRIC, only_sub=0, prefer_fewer_values=False, entropy_func=None): """ Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred. """ entropy_func = entropy_func or entropy val_freq = defaultdict(float) subset_entropy = 0.0 # Calculate the frequency of each of the values in the target attribute for record in data: val_freq[record.get(attr)] += 1.0 # Calculate the sum of the entropy for each subset of records weighted # by their probability of occuring in the training set. for val in val_freq.keys(): val_prob = val_freq[val] / sum(val_freq.values()) data_subset = [record for record in data if record.get(attr) == val] e = entropy_func(data_subset, class_attr, method=method) subset_entropy += val_prob * e if only_sub: return subset_entropy # Subtract the entropy of the chosen attribute from the entropy of the # whole data set with respect to the target attribute (and return it) main_entropy = entropy_func(data, class_attr, method=method) # Prefer gains on attributes with fewer values. if prefer_fewer_values: # n = len(val_freq) # w = (n+1)/float(n)/2 #return (main_entropy - subset_entropy)*w return ((main_entropy - subset_entropy), 1./len(val_freq)) else: return (main_entropy - subset_entropy)
Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred.
entailment
def majority_value(data, class_attr): """ Creates a list of all values in the target attribute for each record in the data list object, and returns the value that appears in this list the most frequently. """ if is_continuous(data[0][class_attr]): return CDist(seq=[record[class_attr] for record in data]) else: return most_frequent([record[class_attr] for record in data])
Creates a list of all values in the target attribute for each record in the data list object, and returns the value that appears in this list the most frequently.
entailment
def most_frequent(lst): """ Returns the item that appears most frequently in the given list. """ lst = lst[:] highest_freq = 0 most_freq = None for val in unique(lst): if lst.count(val) > highest_freq: most_freq = val highest_freq = lst.count(val) return most_freq
Returns the item that appears most frequently in the given list.
entailment
def unique(lst): """ Returns a list made up of the unique values found in lst. i.e., it removes the redundant values in lst. """ lst = lst[:] unique_lst = [] # Cycle through the list and add each value to the unique list only once. for item in lst: if unique_lst.count(item) <= 0: unique_lst.append(item) # Return the list with all redundant values removed. return unique_lst
Returns a list made up of the unique values found in lst. i.e., it removes the redundant values in lst.
entailment
def choose_attribute(data, attributes, class_attr, fitness, method): """ Cycles through all the attributes and returns the attribute with the highest information gain (or lowest entropy). """ best = (-1e999999, None) for attr in attributes: if attr == class_attr: continue gain = fitness(data, attr, class_attr, method=method) best = max(best, (gain, attr)) return best[1]
Cycles through all the attributes and returns the attribute with the highest information gain (or lowest entropy).
entailment
def create_decision_tree(data, attributes, class_attr, fitness_func, wrapper, **kwargs): """ Returns a new decision tree based on the examples given. """ split_attr = kwargs.get('split_attr', None) split_val = kwargs.get('split_val', None) assert class_attr not in attributes node = None data = list(data) if isinstance(data, Data) else data if wrapper.is_continuous_class: stop_value = CDist(seq=[r[class_attr] for r in data]) # For a continuous class case, stop if all the remaining records have # a variance below the given threshold. stop = wrapper.leaf_threshold is not None \ and stop_value.variance <= wrapper.leaf_threshold else: stop_value = DDist(seq=[r[class_attr] for r in data]) # For a discrete class, stop if all remaining records have the same # classification. stop = len(stop_value.counts) <= 1 if not data or len(attributes) <= 0: # If the dataset is empty or the attributes list is empty, return the # default value. The target attribute is not in the attributes list, so # we need not subtract 1 to account for the target attribute. if wrapper: wrapper.leaf_count += 1 return stop_value elif stop: # If all the records in the dataset have the same classification, # return that classification. if wrapper: wrapper.leaf_count += 1 return stop_value else: # Choose the next best attribute to best classify our data best = choose_attribute( data, attributes, class_attr, fitness_func, method=wrapper.metric) # Create a new decision tree/node with the best attribute and an empty # dictionary object--we'll fill that up next. # tree = {best:{}} node = Node(tree=wrapper, attr_name=best) node.n += len(data) # Create a new decision tree/sub-node for each of the values in the # best attribute field for val in get_values(data, best): # Create a subtree for the current value under the "best" field subtree = create_decision_tree( [r for r in data if r[best] == val], [attr for attr in attributes if attr != best], class_attr, fitness_func, split_attr=best, split_val=val, wrapper=wrapper) # Add the new subtree to the empty dictionary object in our new # tree/node we just created. if isinstance(subtree, Node): node._branches[val] = subtree elif isinstance(subtree, (CDist, DDist)): node.set_leaf_dist(attr_value=val, dist=subtree) else: raise Exception("Unknown subtree type: %s" % (type(subtree),)) return node
Returns a new decision tree based on the examples given.
entailment
def add(self, k, count=1): """ Increments the count for the given element. """ self.counts[k] += count self.total += count
Increments the count for the given element.
entailment
def best(self): """ Returns the element with the highest probability. """ b = (-1e999999, None) for k, c in iteritems(self.counts): b = max(b, (c, k)) return b[1]
Returns the element with the highest probability.
entailment
def probs(self): """ Returns a list of probabilities for all elements in the form [(value1,prob1),(value2,prob2),...]. """ return [ (k, self.counts[k]/float(self.total)) for k in iterkeys(self.counts) ]
Returns a list of probabilities for all elements in the form [(value1,prob1),(value2,prob2),...].
entailment
def update(self, dist): """ Adds the given distribution's counts to the current distribution. """ assert isinstance(dist, DDist) for k, c in iteritems(dist.counts): self.counts[k] += c self.total += dist.total
Adds the given distribution's counts to the current distribution.
entailment
def probability_lt(self, x): """ Returns the probability of a random variable being less than the given value. """ if self.mean is None: return return normdist(x=x, mu=self.mean, sigma=self.standard_deviation)
Returns the probability of a random variable being less than the given value.
entailment
def probability_in(self, a, b): """ Returns the probability of a random variable falling between the given values. """ if self.mean is None: return p1 = normdist(x=a, mu=self.mean, sigma=self.standard_deviation) p2 = normdist(x=b, mu=self.mean, sigma=self.standard_deviation) return abs(p1 - p2)
Returns the probability of a random variable falling between the given values.
entailment
def probability_gt(self, x): """ Returns the probability of a random variable being greater than the given value. """ if self.mean is None: return p = normdist(x=x, mu=self.mean, sigma=self.standard_deviation) return 1-p
Returns the probability of a random variable being greater than the given value.
entailment
def copy_no_data(self): """ Returns a copy of the object without any data. """ return type(self)( [], order=list(self.header_modes), types=self.header_types.copy(), modes=self.header_modes.copy())
Returns a copy of the object without any data.
entailment
def is_valid(self, name, value): """ Returns true if the given value matches the type for the given name according to the schema. Returns false otherwise. """ if name not in self.header_types: return False t = self.header_types[name] if t == ATTR_TYPE_DISCRETE: return isinstance(value, int) elif t == ATTR_TYPE_CONTINUOUS: return isinstance(value, (float, Decimal)) return True
Returns true if the given value matches the type for the given name according to the schema. Returns false otherwise.
entailment
def _read_header(self): """ When a CSV file is given, extracts header information the file. Otherwise, this header data must be explicitly given when the object is instantiated. """ if not self.filename or self.header_types: return rows = csv.reader(open(self.filename)) #header = rows.next() header = next(rows) self.header_types = {} # {attr_name:type} self._class_attr_name = None self.header_order = [] # [attr_name,...] for el in header: matches = ATTR_HEADER_PATTERN.findall(el) assert matches, "Invalid header element: %s" % (el,) el_name, el_type, el_mode = matches[0] el_name = el_name.strip() self.header_order.append(el_name) self.header_types[el_name] = el_type if el_mode == ATTR_MODE_CLASS: assert self._class_attr_name is None, \ "Multiple class attributes are not supported." self._class_attr_name = el_name else: assert self.header_types[el_name] != ATTR_TYPE_CONTINUOUS, \ "Non-class continuous attributes are not supported." assert self._class_attr_name, "A class attribute must be specified."
When a CSV file is given, extracts header information the file. Otherwise, this header data must be explicitly given when the object is instantiated.
entailment
def validate_row(self, row): """ Ensure each element in the row matches the schema. """ clean_row = {} if isinstance(row, (tuple, list)): assert self.header_order, "No attribute order specified." assert len(row) == len(self.header_order), \ "Row length does not match header length." itr = zip(self.header_order, row) else: assert isinstance(row, dict) itr = iteritems(row) for el_name, el_value in itr: if self.header_types[el_name] == ATTR_TYPE_DISCRETE: clean_row[el_name] = int(el_value) elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS: clean_row[el_name] = float(el_value) else: clean_row[el_name] = el_value return clean_row
Ensure each element in the row matches the schema.
entailment
def split(self, ratio=0.5, leave_one_out=False): """ Returns two Data instances, containing the data randomly split between the two according to the given ratio. The first instance will contain the ratio of data specified. The second instance will contain the remaining ratio of data. If leave_one_out is True, the ratio will be ignored and the first instance will contain exactly one record for each class label, and the second instance will contain all remaining data. """ a_labels = set() a = self.copy_no_data() b = self.copy_no_data() for row in self: if leave_one_out and not self.is_continuous_class: label = row[self.class_attribute_name] if label not in a_labels: a_labels.add(label) a.data.append(row) else: b.data.append(row) elif not a: a.data.append(row) elif not b: b.data.append(row) elif random.random() <= ratio: a.data.append(row) else: b.data.append(row) return a, b
Returns two Data instances, containing the data randomly split between the two according to the given ratio. The first instance will contain the ratio of data specified. The second instance will contain the remaining ratio of data. If leave_one_out is True, the ratio will be ignored and the first instance will contain exactly one record for each class label, and the second instance will contain all remaining data.
entailment
def _get_attribute_value_for_node(self, record): """ Gets the closest value for the current node's attribute matching the given record. """ # Abort if this node has not get split on an attribute. if self.attr_name is None: return # Otherwise, lookup the attribute value for this node in the # given record. attr = self.attr_name attr_value = record[attr] attr_values = self.get_values(attr) if attr_value in attr_values: return attr_value else: # The value of the attribute in the given record does not directly # map to any previously known values, so apply a missing value # policy. policy = self.tree.missing_value_policy.get(attr) assert policy, \ ("No missing value policy specified for attribute %s.") \ % (attr,) if policy == USE_NEAREST: # Use the value that the tree has seen that's also has the # smallest Euclidean distance to the actual value. assert self.tree.data.header_types[attr] \ in (ATTR_TYPE_DISCRETE, ATTR_TYPE_CONTINUOUS), \ "The use-nearest policy is invalid for nominal types." nearest = (1e999999, None) for _value in attr_values: nearest = min( nearest, (abs(_value - attr_value), _value)) _, nearest_value = nearest return nearest_value else: raise Exception("Unknown missing value policy: %s" % (policy,))
Gets the closest value for the current node's attribute matching the given record.
entailment
def get_values(self, attr_name): """ Retrieves the unique set of values seen for the given attribute at this node. """ ret = list(self._attr_value_cdist[attr_name].keys()) \ + list(self._attr_value_counts[attr_name].keys()) \ + list(self._branches.keys()) ret = set(ret) return ret
Retrieves the unique set of values seen for the given attribute at this node.
entailment
def get_best_splitting_attr(self): """ Returns the name of the attribute with the highest gain. """ best = (-1e999999, None) for attr in self.attributes: best = max(best, (self.get_gain(attr), attr)) best_gain, best_attr = best return best_attr
Returns the name of the attribute with the highest gain.
entailment
def get_entropy(self, attr_name=None, attr_value=None): """ Calculates the entropy of a specific attribute/value combination. """ is_con = self.tree.data.is_continuous_class if is_con: if attr_name is None: # Calculate variance of class attribute. var = self._class_cdist.variance else: # Calculate variance of the given attribute. var = self._attr_value_cdist[attr_name][attr_value].variance if self.tree.metric == VARIANCE1 or attr_name is None: return var elif self.tree.metric == VARIANCE2: unique_value_count = len(self._attr_value_counts[attr_name]) attr_total = float(self._attr_value_count_totals[attr_name]) return var*(unique_value_count/attr_total) else: if attr_name is None: # The total number of times this attr/value pair has been seen. total = float(self._class_ddist.total) # The total number of times each class value has been seen for # this attr/value pair. counts = self._class_ddist.counts # The total number of unique values seen for this attribute. unique_value_count = len(self._class_ddist.counts) # The total number of times this attribute has been seen. attr_total = total else: total = float(self._attr_value_counts[attr_name][attr_value]) counts = self._attr_class_value_counts[attr_name][attr_value] unique_value_count = len(self._attr_value_counts[attr_name]) attr_total = float(self._attr_value_count_totals[attr_name]) assert total, "There must be at least one non-zero count." n = max(2, len(counts)) if self._tree.metric == ENTROPY1: # Traditional entropy. return -sum( (count/total)*math.log(count/total, n) for count in itervalues(counts) ) elif self._tree.metric == ENTROPY2: # Modified entropy that down-weights universally unique values. # e.g. If the number of unique attribute values equals the total # count of the attribute, then it has the maximum amount of unique # values. return -sum( (count/total)*math.log(count/total, n) for count in itervalues(counts) #) - ((len(counts)-1)/float(total)) ) + (unique_value_count/attr_total) elif self._tree.metric == ENTROPY3: # Modified entropy that down-weights universally unique values # as well as features with large numbers of values. return -sum( (count/total)*math.log(count/total, n) for count in itervalues(counts) #) - 100*((len(counts)-1)/float(total)) ) + 100*(unique_value_count/attr_total)
Calculates the entropy of a specific attribute/value combination.
entailment
def get_gain(self, attr_name): """ Calculates the information gain from splitting on the given attribute. """ subset_entropy = 0.0 for value in iterkeys(self._attr_value_counts[attr_name]): value_prob = self.get_value_prob(attr_name, value) e = self.get_entropy(attr_name, value) subset_entropy += value_prob * e return (self.main_entropy - subset_entropy)
Calculates the information gain from splitting on the given attribute.
entailment
def get_value_ddist(self, attr_name, attr_value): """ Returns the class value probability distribution of the given attribute value. """ assert not self.tree.data.is_continuous_class, \ "Discrete distributions are only maintained for " + \ "discrete class types." ddist = DDist() cls_counts = self._attr_class_value_counts[attr_name][attr_value] for cls_value, cls_count in iteritems(cls_counts): ddist.add(cls_value, count=cls_count) return ddist
Returns the class value probability distribution of the given attribute value.
entailment
def get_value_prob(self, attr_name, value): """ Returns the value probability of the given attribute at this node. """ if attr_name not in self._attr_value_count_totals: return n = self._attr_value_counts[attr_name][value] d = self._attr_value_count_totals[attr_name] return n/float(d)
Returns the value probability of the given attribute at this node.
entailment
def predict(self, record, depth=0): """ Returns the estimated value of the class attribute for the given record. """ # Check if we're ready to predict. if not self.ready_to_predict: raise NodeNotReadyToPredict # Lookup attribute value. attr_value = self._get_attribute_value_for_node(record) # Propagate decision to leaf node. if self.attr_name: if attr_value in self._branches: try: return self._branches[attr_value].predict(record, depth=depth+1) except NodeNotReadyToPredict: #TODO:allow re-raise if user doesn't want an intermediate prediction? pass # Otherwise make decision at current node. if self.attr_name: if self._tree.data.is_continuous_class: return self._attr_value_cdist[self.attr_name][attr_value].copy() else: # return self._class_ddist.copy() return self.get_value_ddist(self.attr_name, attr_value) elif self._tree.data.is_continuous_class: # Make decision at current node, which may be a true leaf node # or an incomplete branch in a tree currently being built. assert self._class_cdist is not None return self._class_cdist.copy() else: return self._class_ddist.copy()
Returns the estimated value of the class attribute for the given record.
entailment
def ready_to_split(self): """ Returns true if this node is ready to branch off additional nodes. Returns false otherwise. """ # Never split if we're a leaf that predicts adequately. threshold = self._tree.leaf_threshold if self._tree.data.is_continuous_class: var = self._class_cdist.variance if var is not None and threshold is not None \ and var <= threshold: return False else: best_prob = self._class_ddist.best_prob if best_prob is not None and threshold is not None \ and best_prob >= threshold: return False return self._tree.auto_grow \ and not self.attr_name \ and self.n >= self._tree.splitting_n
Returns true if this node is ready to branch off additional nodes. Returns false otherwise.
entailment
def set_leaf_dist(self, attr_value, dist): """ Sets the probability distribution at a leaf node. """ assert self.attr_name assert self.tree.data.is_valid(self.attr_name, attr_value), \ "Value %s is invalid for attribute %s." \ % (attr_value, self.attr_name) if self.is_continuous_class: assert isinstance(dist, CDist) assert self.attr_name self._attr_value_cdist[self.attr_name][attr_value] = dist.copy() # self.n += dist.count else: assert isinstance(dist, DDist) # {attr_name:{attr_value:count}} self._attr_value_counts[self.attr_name][attr_value] += 1 # {attr_name:total} self._attr_value_count_totals[self.attr_name] += 1 # {attr_name:{attr_value:{class_value:count}}} for cls_value, cls_count in iteritems(dist.counts): self._attr_class_value_counts[self.attr_name][attr_value] \ [cls_value] += cls_count
Sets the probability distribution at a leaf node.
entailment
def train(self, record): """ Incrementally update the statistics at this node. """ self.n += 1 class_attr = self.tree.data.class_attribute_name class_value = record[class_attr] # Update class statistics. is_con = self.tree.data.is_continuous_class if is_con: # For a continuous class. self._class_cdist += class_value else: # For a discrete class. self._class_ddist.add(class_value) # Update attribute statistics. for an, av in iteritems(record): if an == class_attr: continue self._attr_value_counts[an][av] += 1 self._attr_value_count_totals[an] += 1 if is_con: self._attr_value_cdist[an][av] += class_value else: self._attr_class_value_counts[an][av][class_value] += 1 # Decide if branch should split on an attribute. if self.ready_to_split: self.attr_name = self.get_best_splitting_attr() self.tree.leaf_count -= 1 for av in self._attr_value_counts[self.attr_name]: self._branches[av] = Node(tree=self.tree) self.tree.leaf_count += 1 # If we've split, then propagate the update to appropriate sub-branch. if self.attr_name: key = record[self.attr_name] del record[self.attr_name] self._branches[key].train(record)
Incrementally update the statistics at this node.
entailment
def build(cls, data, *args, **kwargs): """ Constructs a classification or regression tree in a single batch by analyzing the given data. """ assert isinstance(data, Data) if data.is_continuous_class: fitness_func = gain_variance else: fitness_func = get_gain t = cls(data=data, *args, **kwargs) t._data = data t.sample_count = len(data) t._tree = create_decision_tree( data=data, attributes=data.attribute_names, class_attr=data.class_attribute_name, fitness_func=fitness_func, wrapper=t, ) return t
Constructs a classification or regression tree in a single batch by analyzing the given data.
entailment
def out_of_bag_mae(self): """ Returns the mean absolute error for predictions on the out-of-bag samples. """ if not self._out_of_bag_mae_clean: try: self._out_of_bag_mae = self.test(self.out_of_bag_samples) self._out_of_bag_mae_clean = True except NodeNotReadyToPredict: return return self._out_of_bag_mae.copy()
Returns the mean absolute error for predictions on the out-of-bag samples.
entailment
def out_of_bag_samples(self): """ Returns the out-of-bag samples list, inside a wrapper to keep track of modifications. """ #TODO:replace with more a generic pass-through wrapper? class O(object): def __init__(self, tree): self.tree = tree def __len__(self): return len(self.tree._out_of_bag_samples) def append(self, v): self.tree._out_of_bag_mae_clean = False return self.tree._out_of_bag_samples.append(v) def pop(self, v): self.tree._out_of_bag_mae_clean = False return self.tree._out_of_bag_samples.pop(v) def __iter__(self): for _ in self.tree._out_of_bag_samples: yield _ return O(self)
Returns the out-of-bag samples list, inside a wrapper to keep track of modifications.
entailment
def set_missing_value_policy(self, policy, target_attr_name=None): """ Sets the behavior for one or all attributes to use when traversing the tree using a query vector and it encounters a branch that does not exist. """ assert policy in MISSING_VALUE_POLICIES, \ "Unknown policy: %s" % (policy,) for attr_name in self.data.attribute_names: if target_attr_name is not None and target_attr_name != attr_name: continue self.missing_value_policy[attr_name] = policy
Sets the behavior for one or all attributes to use when traversing the tree using a query vector and it encounters a branch that does not exist.
entailment
def train(self, record): """ Incrementally updates the tree with the given sample record. """ assert self.data.class_attribute_name in record, \ "The class attribute must be present in the record." record = record.copy() self.sample_count += 1 self.tree.train(record)
Incrementally updates the tree with the given sample record.
entailment
def _fell_trees(self): """ Removes trees from the forest according to the specified fell method. """ if callable(self.fell_method): for tree in self.fell_method(list(self.trees)): self.trees.remove(tree)
Removes trees from the forest according to the specified fell method.
entailment
def _get_best_prediction(self, record, train=True): """ Gets the prediction from the tree with the lowest mean absolute error. """ if not self.trees: return best = (+1e999999, None) for tree in self.trees: best = min(best, (tree.mae.mean, tree)) _, best_tree = best prediction, tree_mae = best_tree.predict(record, train=train) return prediction.mean
Gets the prediction from the tree with the lowest mean absolute error.
entailment