code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if len(lower_bounds) != solution_size or len(upper_bounds) != solution_size:
raise ValueError(
"Lower and upper bounds much have a length equal to the problem size."
)
return common.make_population(population_size, common.random_real_solution,
solution_size, lower_bounds, upper_bounds)
|
def _initial_population_gsa(population_size, solution_size, lower_bounds,
upper_bounds)
|
Create a random initial population of floating point values.
Args:
population_size: an integer representing the number of solutions in the population.
problem_size: the number of values in each solution.
lower_bounds: a list, each value is a lower bound for the corresponding
part of the solution.
upper_bounds: a list, each value is a upper bound for the corresponding
part of the solution.
Returns:
list; A list of random solutions.
| 3.546753
| 3.902716
| 0.908791
|
# Update the gravitational constant, and the best and worst of the population
# Calculate the mass and acceleration for each solution
# Update the velocity and position of each solution
population_size = len(population)
solution_size = len(population[0])
# In GSA paper, grav is G
grav = _next_grav_gsa(grav_initial, grav_reduction_rate, iteration,
max_iterations)
masses = _get_masses(fitnesses)
# Create bundled solution with position and mass for the K best calculation
# Also store index to later check if two solutions are the same
# Sorted by solution fitness (mass)
solutions = [{
'pos': pos,
'mass': mass,
'index': i
} for i, (pos, mass) in enumerate(zip(population, masses))]
solutions.sort(key=lambda x: x['mass'], reverse=True)
# Get the force on each solution
# Only the best K solutions apply force
# K linearly decreases to 1
num_best = int(population_size - (population_size - 1) *
(iteration / float(max_iterations)))
forces = []
for i in range(population_size):
force_vectors = []
for j in range(num_best):
# If it is not the same solution
if i != solutions[j]['index']:
force_vectors.append(
_gsa_force(grav, masses[i], solutions[j]['mass'],
population[i], solutions[j]['pos']))
forces.append(_gsa_total_force(force_vectors, solution_size))
# Get the acceleration of each solution
accelerations = []
for i in range(population_size):
accelerations.append(_gsa_acceleration(forces[i], masses[i]))
# Update the velocity of each solution
new_velocities = []
for i in range(population_size):
new_velocities.append(
_gsa_update_velocity(velocities[i], accelerations[i]))
# Create the new population
new_population = []
for i in range(population_size):
new_position = _gsa_update_position(population[i], new_velocities[i])
# Constrain to bounds
new_position = list(
numpy.clip(new_position, lower_bounds, upper_bounds))
new_population.append(new_position)
return new_population, new_velocities
|
def _new_population_gsa(population, fitnesses, velocities, lower_bounds,
upper_bounds, grav_initial, grav_reduction_rate,
iteration, max_iterations)
|
Generate a new population as given by GSA algorithm.
In GSA paper, grav_initial is G_i
| 3.139868
| 3.056599
| 1.027242
|
return grav_initial * math.exp(
-grav_reduction_rate * iteration / float(max_iterations))
|
def _next_grav_gsa(grav_initial, grav_reduction_rate, iteration,
max_iterations)
|
Calculate G as given by GSA algorithm.
In GSA paper, grav is G
| 3.547425
| 4.477486
| 0.79228
|
# Obtain constants
best_fitness = max(fitnesses)
worst_fitness = min(fitnesses)
fitness_range = best_fitness - worst_fitness
# Calculate raw masses for each solution
raw_masses = []
for fitness in fitnesses:
# Epsilon is added to prevent divide by zero errors
raw_masses.append((fitness - worst_fitness) / (fitness_range + EPSILON)
+ EPSILON)
# Normalize to obtain final mass for each solution
total_mass = sum(raw_masses)
masses = []
for mass in raw_masses:
masses.append(mass / total_mass)
return masses
|
def _get_masses(fitnesses)
|
Convert fitnesses into masses, as given by GSA algorithm.
| 2.971659
| 2.901157
| 1.024301
|
position_diff = numpy.subtract(position_j, position_i)
distance = numpy.linalg.norm(position_diff)
# The first 3 terms give the magnitude of the force
# The last term is a vector that provides the direction
# Epsilon prevents divide by zero errors
return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
|
def _gsa_force(grav, mass_i, mass_j, position_i, position_j)
|
Gives the force of solution j on solution i.
Variable name in GSA paper given in ()
args:
grav: The gravitational constant. (G)
mass_i: The mass of solution i (derived from fitness). (M_i)
mass_j: The mass of solution j (derived from fitness). (M_j)
position_i: The position of solution i. (x_i)
position_j: The position of solution j. (x_j)
returns:
numpy.array; The force vector of solution j on solution i.
| 5.02235
| 5.428835
| 0.925125
|
if len(force_vectors) == 0:
return [0.0] * vector_length
# The GSA algorithm specifies that the total force in each dimension
# is a random sum of the individual forces in that dimension.
# For this reason we sum the dimensions individually instead of simply
# using vec_a+vec_b
total_force = [0.0] * vector_length
for force_vec in force_vectors:
for i in range(vector_length):
total_force[i] += random.uniform(0.0, 1.0) * force_vec[i]
return total_force
|
def _gsa_total_force(force_vectors, vector_length)
|
Return a randomly weighted sum of the force vectors.
args:
force_vectors: A list of force vectors on solution i.
returns:
numpy.array; The total force on solution i.
| 3.415038
| 3.677368
| 0.928664
|
# The GSA algorithm specifies that the new velocity for each dimension
# is a sum of a random fraction of its current velocity in that dimension,
# and its acceleration in that dimension
# For this reason we sum the dimensions individually instead of simply
# using vec_a+vec_b
new_velocity = []
for vel, acc in zip(velocity, acceleration):
new_velocity.append(random.uniform(0.0, 1.0) * vel + acc)
return new_velocity
|
def _gsa_update_velocity(velocity, acceleration)
|
Stochastically update velocity given acceleration.
In GSA paper, velocity is v_i, acceleration is a_i
| 5.922002
| 6.193501
| 0.956164
|
# Selection
# Create the population of parents that will be crossed and mutated.
intermediate_population = selection_function(population, fitnesses)
# Crossover
new_population = _crossover(intermediate_population, crossover_chance,
crossover_function)
# Mutation
# Mutates chromosomes in place
gaoperators.random_flip_mutate(new_population, mutation_chance)
# Return new population
return new_population
|
def _new_population_genalg(population,
fitnesses,
mutation_chance=0.02,
crossover_chance=0.7,
selection_function=gaoperators.tournament_selection,
crossover_function=gaoperators.one_point_crossover)
|
Perform all genetic algorithm operations on a population, and return a new population.
population must have an even number of chromosomes.
Args:
population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]]
fitness: A list of fitnesses that correspond with chromosomes in the population,
ex. [1.2, 10.8]
mutation_chance: the chance that a bit will be flipped during mutation
crossover_chance: the chance that two parents will be crossed during crossover
selection_function: A function that will select parents for crossover and mutation
crossover_function: A function that will cross two parents
Returns:
list; A new population of chromosomes, that should be more fit.
| 3.960755
| 4.364309
| 0.907533
|
new_population = []
for i in range(0, len(population), 2): # For every other index
# Take parents from every set of 2 in the population
# Wrap index if out of range
try:
parents = (population[i], population[i + 1])
except IndexError:
parents = (population[i], population[0])
# If crossover takes place
if random.uniform(0.0, 1.0) <= crossover_chance:
# Add children to the new population
new_population.extend(crossover_operator(parents))
else:
new_population.extend(parents)
return new_population
|
def _crossover(population, crossover_chance, crossover_operator)
|
Perform crossover on a population, return the new crossed-over population.
| 3.172264
| 3.110704
| 1.01979
|
return [
random.uniform(lower_bounds[i], upper_bounds[i])
for i in range(solution_size)
]
|
def random_real_solution(solution_size, lower_bounds, upper_bounds)
|
Make a list of random real numbers between lower and upper bounds.
| 2.183162
| 2.046851
| 1.066596
|
return [
solution_generator(*args, **kwargs) for _ in range(population_size)
]
|
def make_population(population_size, solution_generator, *args, **kwargs)
|
Make a population with the supplied generator.
| 2.968879
| 2.822238
| 1.051959
|
# Optimization if diversity factor is disabled
if diversity_weight <= 0.0:
fitness_pop = zip(fitnesses,
population) # Zip for easy fitness comparison
# Get num_competitors random chromosomes, then add best to result,
# by taking max fitness and getting chromosome from tuple.
# Repeat until full.
return [
max(random.sample(fitness_pop, num_competitors))[1]
for _ in range(len(population))
]
else:
indices = range(len(population))
# Select tournament winners by either max fitness or diversity.
# The metric to check is randomly selected, weighted by diversity_weight.
# diversity_metric is calculated between the given solution,
# and the list of all currently selected solutions.
selected_solutions = []
# Select as many solutions are there are in population
for _ in range(len(population)):
competitor_indices = random.sample(indices, num_competitors)
# Select by either fitness or diversity,
# Selected by weighted random selection
# NOTE: We assume fitness has a weight of 1.0
if random.uniform(0.0, 1.0) < (1.0 / (1.0 + diversity_weight)):
# Fitness
selected_solutions.append(
max(
zip([fitnesses[i] for i in competitor_indices],
[population[i] for i in competitor_indices]))[-1])
else:
# Diversity
# Break ties by fitness
selected_solutions.append(
max(
zip([
_diversity_metric(population[i], selected_solutions
) for i in competitor_indices
], [fitnesses[i] for i in competitor_indices],
[population[i] for i in competitor_indices]))[-1])
return selected_solutions
|
def tournament_selection(population,
fitnesses,
num_competitors=2,
diversity_weight=0.0)
|
Create a list of parents with tournament selection.
Args:
population: A list of solutions.
fitnesses: A list of fitness values corresponding to solutions in population.
num_competitors: Number of solutions to compare every round.
Best solution among competitors is selected.
diversity_weight: Weight of diversity metric.
Determines how frequently diversity is used to select tournament winners.
Note that fitness is given a weight of 1.0.
diversity_weight == 1.0 gives equal weight to diversity and fitness.
| 4.850502
| 4.71158
| 1.029485
|
pop_size = len(population)
probabilities = _fitnesses_to_probabilities(fitnesses)
# Create selection list (for stochastic universal sampling)
selection_list = []
selection_spacing = 1.0 / pop_size
selection_start = random.uniform(0.0, selection_spacing)
for i in range(pop_size):
selection_list.append(selection_start + selection_spacing * i)
# Select intermediate population according to selection list
intermediate_population = []
for selection in selection_list:
for (i, probability) in enumerate(probabilities):
if probability >= selection:
intermediate_population.append(population[i])
break
random.shuffle(intermediate_population)
return intermediate_population
|
def stochastic_selection(population, fitnesses)
|
Create a list of parents with stochastic universal sampling.
| 2.829669
| 2.739012
| 1.033099
|
probabilities = _fitnesses_to_probabilities(fitnesses)
intermediate_population = []
for _ in range(len(population)):
# Choose a random individual
selection = random.uniform(0.0, 1.0)
# Iterate over probabilities list
for i, probability in enumerate(probabilities):
if probability >= selection: # First probability that is greater
intermediate_population.append(population[i])
break
return intermediate_population
|
def roulette_selection(population, fitnesses)
|
Create a list of parents with roulette selection.
| 3.579788
| 3.53331
| 1.013154
|
# Subtract min, making smallest value 0
min_val = min(vector)
vector = [v - min_val for v in vector]
# Divide by max, making largest value 1
max_val = float(max(vector))
try:
return [v / max_val for v in vector]
except ZeroDivisionError: # All values are the same
return [1.0] * len(vector)
|
def _rescale(vector)
|
Scale values in vector to the range [0, 1].
Args:
vector: A list of real values.
| 3.178751
| 3.218361
| 0.987692
|
# Edge case for empty population
# If there are no other solutions, the given solution has maximum diversity
if population == []:
return 1.0
return (
sum([_manhattan_distance(solution, other) for other in population])
# Normalize (assuming each value in solution is in range [0.0, 1.0])
# NOTE: len(solution) is maximum manhattan distance
/ (len(population) * len(solution)))
|
def _diversity_metric(solution, population)
|
Return diversity value for solution compared to given population.
Metric is sum of distance between solution and each solution in population,
normalized to [0.0, 1.0].
| 6.293969
| 5.772027
| 1.090426
|
if len(vec_a) != len(vec_b):
raise ValueError('len(vec_a) must equal len(vec_b)')
return sum(map(lambda a, b: abs(a - b), vec_a, vec_b))
|
def _manhattan_distance(vec_a, vec_b)
|
Return manhattan distance between two lists of numbers.
| 2.053229
| 1.800551
| 1.140334
|
# Do not allow negative fitness values
min_fitness = min(fitnesses)
if min_fitness < 0.0:
# Make smallest fitness value 0
fitnesses = map(lambda f: f - min_fitness, fitnesses)
fitness_sum = sum(fitnesses)
# Generate probabilities
# Creates a list of increasing values.
# The greater the gap between two values, the greater the probability.
# Ex. [0.1, 0.23, 0.56, 1.0]
prob_sum = 0.0
probabilities = []
for fitness in fitnesses:
if fitness < 0:
raise ValueError(
"Fitness cannot be negative, fitness = {}.".format(fitness))
prob_sum += (fitness / fitness_sum)
probabilities.append(prob_sum)
probabilities[-1] += 0.0001 # to compensate for rounding errors
return probabilities
|
def _fitnesses_to_probabilities(fitnesses)
|
Return a list of probabilities proportional to fitnesses.
| 3.42889
| 3.372561
| 1.016702
|
# The point that the chromosomes will be crossed at (see Ex. above)
crossover_point = random.randint(1, len(parents[0]) - 1)
return (_one_parent_crossover(parents[0], parents[1], crossover_point),
_one_parent_crossover(parents[1], parents[0], crossover_point))
|
def one_point_crossover(parents)
|
Perform one point crossover on two parent chromosomes.
Select a random position in the chromosome.
Take genes to the left from one parent and the rest from the other parent.
Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy
| 3.511214
| 3.928682
| 0.893738
|
chromosome_length = len(parents[0])
children = [[], []]
for i in range(chromosome_length):
selected_parent = random.randint(0, 1)
# Take from the selected parent, and add it to child 1
# Take from the other parent, and add it to child 2
children[0].append(parents[selected_parent][i])
children[1].append(parents[1 - selected_parent][i])
return children
|
def uniform_crossover(parents)
|
Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
| 2.641672
| 2.639406
| 1.000858
|
for chromosome in population: # For every chromosome in the population
for i in range(len(chromosome)): # For every bit in the chromosome
# If mutation takes place
if random.uniform(0.0, 1.0) <= mutation_chance:
chromosome[i] = 1 - chromosome[i]
|
def random_flip_mutate(population, mutation_chance)
|
Mutate every chromosome in a population, list is modified in place.
Mutation occurs by randomly flipping bits (genes).
| 2.588896
| 2.637138
| 0.981707
|
item_indices = {}
for i, item in enumerate(list_):
try:
item_indices[item].append(i)
except KeyError: # First time seen
item_indices[item] = [i]
return item_indices
|
def _duplicates(list_)
|
Return dict mapping item -> indices.
| 2.661853
| 1.984396
| 1.341392
|
# WARNING: meta_parameters is modified inline
locked_values = {}
if parameter_locks:
for name in parameter_locks:
# Store the current optimzier value
# and remove from our dictionary of paramaters to optimize
locked_values[name] = getattr(optimizer, name)
meta_parameters.pop(name)
return locked_values
|
def _parse_parameter_locks(optimizer, meta_parameters, parameter_locks)
|
Synchronize meta_parameters and locked_values.
The union of these two sets will have all necessary parameters.
locked_values will have the parameters specified in parameter_locks.
| 7.59353
| 7.303898
| 1.039654
|
# WARNING: meta_parameters is modified inline
solution_size = 0
for _, parameters in meta_parameters.iteritems():
if parameters['type'] == 'discrete':
# Binary encoding of discrete values -> log_2 N
num_values = len(parameters['values'])
binary_size = helpers.binary_size(num_values)
elif parameters['type'] == 'int':
# Use enough bits to cover range from min to max
# + 1 to include max in range
int_range = parameters['max'] - parameters['min'] + 1
binary_size = helpers.binary_size(int_range)
elif parameters['type'] == 'float':
# Use enough bits to provide fine steps between min and max
float_range = parameters['max'] - parameters['min']
# * 1000 provides 1000 values between each natural number
binary_size = helpers.binary_size(float_range * 1000)
else:
raise ValueError('Parameter type "{}" does not match known values'.
format(parameters['type']))
# Store binary size with parameters for use in decode function
parameters['binary_size'] = binary_size
solution_size += binary_size
return solution_size
|
def _get_hyperparameter_solution_size(meta_parameters)
|
Determine size of binary encoding of parameters.
Also adds binary size information for each parameter.
| 3.99592
| 3.81376
| 1.047764
|
# Locked parameters are also returned by decode function, but are not
# based on solution
def decode(solution):
# Start with out stationary (locked) paramaters
hyperparameters = copy.deepcopy(locked_values)
# Obtain moving hyperparameters from binary solution
index = 0
for name, parameters in meta_parameters.iteritems():
# Obtain binary for this hyperparameter
binary_size = parameters['binary_size']
binary = solution[index:index + binary_size]
index += binary_size # Just index to start of next hyperparameter
# Decode binary
if parameters['type'] == 'discrete':
i = helpers.binary_to_int(
binary, upper_bound=len(parameters['values']) - 1)
value = parameters['values'][i]
elif parameters['type'] == 'int':
value = helpers.binary_to_int(
binary,
lower_bound=parameters['min'],
upper_bound=parameters['max'])
elif parameters['type'] == 'float':
value = helpers.binary_to_float(
binary,
lower_bound=parameters['min'],
upper_bound=parameters['max'])
else:
raise ValueError(
'Parameter type "{}" does not match known values'.format(
parameters['type']))
# Store value
hyperparameters[name] = value
return hyperparameters
return decode
|
def _make_hyperparameter_decode_func(locked_values, meta_parameters)
|
Create a function that converts the binary solution to parameters.
| 3.610339
| 3.399155
| 1.062128
|
# Create the optimizer with parameters encoded in solution
optimizer = copy.deepcopy(_optimizer)
optimizer._set_hyperparameters(parameters)
optimizer.logging = False
# Preload fitness dictionary from master, and disable clearing dict
# NOTE: master_fitness_dict will be modified inline, and therefore,
# we do not need to take additional steps to update it
if _master_fitness_dict != None: # None means low memory mode
optimizer.clear_cache = False
optimizer._Optimizer__encoded_cache = _master_fitness_dict
# Because metaheuristics are stochastic, we run the optimizer multiple times,
# to obtain an average of performance
all_evaluation_runs = []
solutions_found = []
for _ in range(_runs):
for problem in _problems:
# Get performance for problem
optimizer.optimize(problem)
all_evaluation_runs.append(optimizer.fitness_runs)
if optimizer.solution_found:
solutions_found.append(1.0)
else:
solutions_found.append(0.0)
# Our main goal is to minimize time the optimizer takes
fitness = 1.0 / helpers.avg(all_evaluation_runs)
# Optimizer is heavily penalized for missing solutions
# To avoid 0 fitness
fitness = fitness * helpers.avg(solutions_found)**2 + 1e-19
return fitness
|
def _meta_fitness_func(parameters,
_optimizer,
_problems,
_master_fitness_dict,
_runs=20)
|
Test a metaheuristic with parameters encoded in solution.
Our goal is to minimize number of evaluation runs until a solution is found,
while maximizing chance of finding solution to the underlying problem
NOTE: while meta optimization requires a 'known' solution, this solution
can be an estimate to provide the meta optimizer with a sense of progress.
| 6.964772
| 6.261477
| 1.112321
|
if fitness_function is None:
fitness_function = self._fitness_function
if decode_function is None:
decode_function = self._decode_function
if fitness_args is None:
fitness_args = self._fitness_args
if decode_args is None:
decode_args = self._decode_args
if fitness_kwargs is None:
fitness_kwargs = self._fitness_kwargs
if decode_kwargs is None:
decode_kwargs = self._decode_kwargs
return Problem(
fitness_function,
decode_function=decode_function,
fitness_args=fitness_args,
decode_args=decode_args,
fitness_kwargs=fitness_kwargs,
decode_kwargs=decode_kwargs)
|
def copy(self,
fitness_function=None,
decode_function=None,
fitness_args=None,
decode_args=None,
fitness_kwargs=None,
decode_kwargs=None)
|
Return a copy of this problem.
Optionally replace this problems arguments with those passed in.
| 1.323544
| 1.279009
| 1.03482
|
return self._fitness_function(solution, *self._fitness_args,
**self._fitness_kwargs)
|
def get_fitness(self, solution)
|
Return fitness for the given solution.
| 4.854525
| 3.990703
| 1.216459
|
return self._decode_function(encoded_solution, *self._decode_args,
**self._decode_kwargs)
|
def decode_solution(self, encoded_solution)
|
Return solution from an encoded representation.
| 4.659289
| 4.257683
| 1.094325
|
if not isinstance(problem, Problem):
raise TypeError('problem must be an instance of Problem class')
# Prepare pool for multiprocessing
if n_processes > 0:
try:
pool = multiprocessing.Pool(processes=n_processes)
except NameError:
raise ImportError(
'pickle, dill, or multiprocessing library is not available.'
)
else:
pool = None
# Set first, incase optimizer uses _max_iterations in initialization
self.__max_iterations = max_iterations
# Initialize algorithm
self._reset()
best_solution = {'solution': None, 'fitness': None}
population = self.initial_population()
try:
# Begin optimization loop
start = time.clock()
for self.iteration in itertools.count(1): # Infinite sequence of iterations
# Evaluate potential solutions
solutions, fitnesses, finished = self._get_fitnesses(
problem,
population,
cache_encoded=cache_encoded,
cache_solution=cache_solution,
pool=pool)
# If the best fitness from this iteration is better than
# the global best
best_index, best_fitness = max(
enumerate(fitnesses), key=operator.itemgetter(1))
if best_fitness > best_solution['fitness']:
# Store the new best solution
best_solution['fitness'] = best_fitness
best_solution['solution'] = solutions[best_index]
if logging_func:
logging_func(self.iteration, population, solutions,
fitnesses, best_solution['solution'],
best_solution['fitness'])
# Break if solution found
if finished:
self.solution_found = True
break
# Break if out of time
if time.clock() - start >= max_seconds:
break
# Break if out of iterations
if self.iteration >= max_iterations:
break
# Continue optimizing
population = self.next_population(population, fitnesses)
# Store best internally, before returning
self.best_solution = best_solution['solution']
self.best_fitness = best_solution['fitness']
finally:
# Clear caches
if clear_cache:
# Clear caches from memory
self.__encoded_cache = {}
self.__solution_cache = {}
# Reset encoded, and decoded key functions
self._get_encoded_key = self._get_encoded_key_type
self._get_solution_key = self._get_solution_key_type
# Clean up multiprocesses
try:
pool.terminate() # Kill outstanding work
pool.close() # Close child processes
except AttributeError:
# No pool
assert pool is None
return self.best_solution
|
def optimize(self, problem, max_iterations=100, max_seconds=float('inf'),
cache_encoded=True, cache_solution=False, clear_cache=True,
logging_func=_print_fitnesses,
n_processes=0)
|
Find the optimal inputs for a given fitness function.
Args:
problem: An instance of Problem. The problem to solve.
max_iterations: The number of iterations to optimize before stopping.
max_seconds: Maximum number of seconds to optimize for, before stopping.
Note that condition is only checked one per iteration,
meaning optimization can take more than max_seconds,
especially if fitnesses take a long time to calculate.
cache_encoded: bool; Whether or not to cache fitness of encoded strings.
Encoded strings are produced directly by the optimizer.
If an encoded string is found in cache, it will not be decoded.
cache_solution: bool; Whether or not to cache fitness of decoded solutions.
Decoded solution is provided by problems decode function.
If problem does not provide a hash solution function,
Various naive hashing methods will be attempted, including:
tuple, tuple(sorted(dict.items)), str.
clear_cache: bool; Whether or not to reset cache after optimization.
Disable if you want to run optimize multiple times on the same problem.
logging_func: func/None; Function taking:
iteration, population, solutions, fitnesses, best_solution, best_fitness
Called after every iteration.
Use for custom logging, or set to None to disable logging.
Note that best_solution and best_fitness are the best of all iterations so far.
n_processes: int; Number of processes to use for multiprocessing.
If <= 0, do not use multiprocessing.
Returns:
object; The best solution, after decoding.
| 3.339907
| 3.26942
| 1.02156
|
self.iteration = 0
self.fitness_runs = 0
self.best_solution = None
self.best_fitness = None
self.solution_found = False
|
def _reset_bookkeeping(self)
|
Reset bookkeeping parameters to initial values.
Call before beginning optimization.
| 5.019402
| 4.526923
| 1.108789
|
if keys is not None: # Otherwise, cannot hash items
# Remove duplicates first (use keys)
# Create mapping (dict) of key to list of indices
key_indices = _duplicates(keys).values()
else: # Cannot hash items
# Assume no duplicates
key_indices = [[i] for i in range(len(items))]
# Use only the first of duplicate indices in decoding
if pool is not None:
# Parallel map
results = pool.map(
functools.partial(_unpickle_run, pickle.dumps(func)),
[items[i[0]] for i in key_indices])
else:
results = map(func, [items[i[0]] for i in key_indices])
# Add bookkeeping
if bookkeeping_dict is not None:
bookkeeping_dict['key_indices'] = key_indices
# Combine duplicates back into list
all_results = [None] * len(items)
for indices, result in zip(key_indices, results):
for j, i in enumerate(indices):
# Avoid duplicate result objects in list,
# in case they are used in functions with side effects
if j > 0:
result = copy.deepcopy(result)
all_results[i] = result
return all_results
|
def _pmap(self, func, items, keys, pool, bookkeeping_dict=None)
|
Efficiently map func over all items.
Calls func only once for duplicate items.
Item duplicates are detected by corresponding keys.
Unless keys is None.
Serial if pool is None, but still skips duplicates.
| 4.474423
| 4.400955
| 1.016694
|
for name, value in parameters.iteritems():
try:
getattr(self, name)
except AttributeError:
raise ValueError(
'Each parameter in parameters must be an attribute. '
'{} is not.'.format(name))
setattr(self, name, value)
|
def _set_hyperparameters(self, parameters)
|
Set internal optimization parameters.
| 3.731776
| 3.459814
| 1.078606
|
hyperparameters = {}
for key in self._hyperparameters:
hyperparameters[key] = getattr(self, key)
return hyperparameters
|
def _get_hyperparameters(self)
|
Get internal optimization parameters.
| 3.106309
| 2.69789
| 1.151385
|
if smoothing <= 0:
raise ValueError('smoothing must be > 0')
# problems supports either one or many problem instances
if isinstance(problems, collections.Iterable):
for problem in problems:
if not isinstance(problem, Problem):
raise TypeError(
'problem must be Problem instance or list of Problem instances'
)
elif isinstance(problems, Problem):
problems = [problems]
else:
raise TypeError(
'problem must be Problem instance or list of Problem instances'
)
# Copy to avoid permanent modification
meta_parameters = copy.deepcopy(self._hyperparameters)
# First, handle parameter locks, since it will modify our
# meta_parameters dict
locked_values = _parse_parameter_locks(self, meta_parameters,
parameter_locks)
# We need to know the size of our chromosome,
# based on the hyperparameters to optimize
solution_size = _get_hyperparameter_solution_size(meta_parameters)
# We also need to create a decode function to transform the binary solution
# into parameters for the metaheuristic
decode = _make_hyperparameter_decode_func(locked_values,
meta_parameters)
# A master fitness dictionary can be stored for use between calls
# to meta_fitness
if _low_memory:
master_fitness_dict = None
else:
master_fitness_dict = {}
additional_parameters = {
'_optimizer': self,
'_problems': problems,
'_runs': smoothing,
'_master_fitness_dict': master_fitness_dict,
}
META_FITNESS = Problem(
_meta_fitness_func,
decode_function=decode,
fitness_kwargs=additional_parameters)
if _meta_optimizer is None:
# Initialize default meta optimizer
# GenAlg is used because it supports both discrete and continous
# attributes
from optimal import GenAlg
# Create metaheuristic with computed decode function and soltuion
# size
_meta_optimizer = GenAlg(solution_size)
else:
# Adjust supplied metaheuristic for this problem
_meta_optimizer._solution_size = solution_size
# Determine the best hyperparameters with a metaheuristic
best_parameters = _meta_optimizer.optimize(
META_FITNESS, max_iterations=max_iterations)
# Set the hyperparameters inline
self._set_hyperparameters(best_parameters)
# And return
return best_parameters
|
def optimize_hyperparameters(self,
problems,
parameter_locks=None,
smoothing=20,
max_iterations=100,
_meta_optimizer=None,
_low_memory=True)
|
Optimize hyperparameters for a given problem.
Args:
parameter_locks: a list of strings, each corresponding to a hyperparamter
that should not be optimized.
problems: Either a single problem, or a list of problem instances,
allowing optimization based on multiple similar problems.
smoothing: int; number of runs to average over for each set of hyperparameters.
max_iterations: The number of iterations to optimize before stopping.
_low_memory: disable performance enhancements to save memory
(they use a lot of memory otherwise).
| 5.038613
| 5.05813
| 0.996141
|
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats
|
def compare(optimizers, problems, runs=20, all_kwargs={})
|
Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
| 2.731803
| 2.673008
| 1.021996
|
stats = {'runs': []}
# Disable logging, to avoid spamming the user
# TODO: Maybe we shouldn't disable by default?
kwargs = copy.copy(kwargs)
kwargs['logging_func'] = None
# Determine effectiveness of metaheuristic over many runs
# The stochastic nature of metaheuristics make this necessary
# for an accurate evaluation
for _ in range(runs):
optimizer.optimize(problem, **kwargs)
# Convert bool to number for mean and standard deviation calculations
if optimizer.solution_found:
finished_num = 1.0
else:
finished_num = 0.0
stats_ = {
'fitness': optimizer.best_fitness,
'fitness_runs': optimizer.fitness_runs,
'solution_found': finished_num
}
stats['runs'].append(stats_)
# Little progress 'bar'
print '.',
# Mean gives a good overall idea of the metaheuristics effectiveness
# Standard deviation (SD) shows consistency of performance
_add_mean_sd_to_stats(stats)
return stats
|
def benchmark(optimizer, problem, runs=20, **kwargs)
|
Run an optimizer through a problem multiple times.
Args:
optimizer: Optimizer; The optimizer to benchmark.
problem: Problem; The problem to benchmark on.
runs: int > 0; Number of times that optimize is called on problem.
Returns:
dict; A dictionary of various statistics.
| 7.367486
| 7.717766
| 0.954614
|
aggregate_stats = {'means': [], 'standard_deviations': []}
for optimizer_key in all_stats:
# runs is the mean, for add_mean_sd function
mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])
mean_stats['name'] = optimizer_key
aggregate_stats['means'].append(mean_stats)
# also keep track of standard deviations
sd_stats = copy.deepcopy(
all_stats[optimizer_key]['standard_deviation'])
sd_stats['name'] = optimizer_key
aggregate_stats['standard_deviations'].append(sd_stats)
_add_mean_sd_to_stats(aggregate_stats, 'means')
return aggregate_stats
|
def aggregate(all_stats)
|
Combine stats for multiple optimizers to obtain one mean and sd.
Useful for combining stats for the same optimizer class and multiple problems.
Args:
all_stats: dict; output from compare.
| 3.244045
| 3.230903
| 1.004067
|
num_runs = len(stats[key])
first = stats[key][0]
mean = {}
for stat_key in first:
# Skip non numberic attributes
if isinstance(first[stat_key], numbers.Number):
mean[stat_key] = sum(run[stat_key]
for run in stats[key]) / float(num_runs)
return mean
|
def _mean_of_runs(stats, key='runs')
|
Obtain the mean of stats.
Args:
stats: dict; A set of stats, structured as above.
key: str; Optional key to determine where list of runs is found in stats
| 3.350061
| 3.747424
| 0.893964
|
num_runs = len(stats[key])
first = stats[key][0]
standard_deviation = {}
for stat_key in first:
# Skip non numberic attributes
if isinstance(first[stat_key], numbers.Number):
standard_deviation[stat_key] = math.sqrt(
sum((run[stat_key] - mean[stat_key])**2
for run in stats[key]) / float(num_runs))
return standard_deviation
|
def _sd_of_runs(stats, mean, key='runs')
|
Obtain the standard deviation of stats.
Args:
stats: dict; A set of stats, structured as above.
mean: dict; Mean for each key in stats.
key: str; Optional key to determine where list of runs is found in stats
| 2.8613
| 3.146411
| 0.909385
|
return map(int,
numpy.random.random(probability_vec.size) <= probability_vec)
|
def _sample(probability_vec)
|
Return random binary string, with given probabilities.
| 8.048223
| 7.019969
| 1.146476
|
best_solution = max(zip(fitnesses, population))[1]
# Shift probabilities towards best solution
return _adjust(probability_vec, best_solution, adjust_rate)
|
def _adjust_probability_vec_best(population, fitnesses, probability_vec,
adjust_rate)
|
Shift probabilities towards the best solution.
| 6.726266
| 4.887636
| 1.37618
|
bits_to_mutate = numpy.random.random(probability_vec.size) <= mutation_chance
probability_vec[bits_to_mutate] = _adjust(
probability_vec[bits_to_mutate],
numpy.random.random(numpy.sum(bits_to_mutate)), mutation_adjust_rate)
|
def _mutate_probability_vec(probability_vec, mutation_chance, mutation_adjust_rate)
|
Randomly adjust probabilities.
WARNING: Modifies probability_vec argument.
| 2.833557
| 3.073611
| 0.921899
|
# Update probability vector
self._probability_vec = _adjust_probability_vec_best(
population, fitnesses, self._probability_vec, self._adjust_rate)
# Mutate probability vector
_mutate_probability_vec(self._probability_vec, self._mutation_chance,
self._mutation_adjust_rate)
# Return new samples
return [
_sample(self._probability_vec)
for _ in range(self._population_size)
]
|
def next_population(self, population, fitnesses)
|
Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
| 4.568827
| 4.944593
| 0.924005
|
# Get our benchmark stats
all_stats = benchmark.compare(optimizer, PROBLEMS, runs=100)
return benchmark.aggregate(all_stats)
|
def benchmark_multi(optimizer)
|
Benchmark an optimizer configuration on multiple functions.
| 13.309964
| 14.041458
| 0.947905
|
population = []
for _ in range(population_size):
solution = []
for probability in probabilities:
# probability of 1.0: always 1
# probability of 0.0: always 0
if random.uniform(0.0, 1.0) < probability:
solution.append(1)
else:
solution.append(0)
population.append(solution)
return population
|
def _sample(probabilities, population_size)
|
Return a random population, drawn with regard to a set of probabilities
| 2.313186
| 2.220696
| 1.041649
|
# 1.0 - abs(bit - p) gives probability of bit given p
return _prod([1.0 - abs(bit - p) for bit, p in zip(solution, pdf)])
|
def _chance(solution, pdf)
|
Return the chance of obtaining a solution from a pdf.
The probability of many independant weighted "coin flips" (one for each bit)
| 7.894166
| 8.208694
| 0.961684
|
# Add the chance of obtaining a solution from the pdf
# when the fitness for that solution exceeds a threshold
value = 0.0
for solution, fitness in zip(population, fitnesses):
if fitness >= fitness_threshold:
# 1.0 + chance to avoid issues with chance of 0
value += math.log(1.0 + _chance(solution, pdf))
# The official equation states that value is now divided by len(fitnesses)
# however, this is unnecessary when we are only obtaining the best pdf,
# because every solution is of the same size
return value
|
def _pdf_value(pdf, population, fitnesses, fitness_threshold)
|
Give the value of a pdf.
This represents the likelihood of a pdf generating solutions
that exceed the threshold.
| 9.429694
| 9.535213
| 0.988934
|
# First we determine a fitness threshold based on a quantile of fitnesses
fitness_threshold = _get_quantile_cutoff(fitnesses, quantile)
# Then check all of our possible pdfs with a stochastic program
return _best_pdf(pdfs, population, fitnesses, fitness_threshold)
|
def _update_pdf(population, fitnesses, pdfs, quantile)
|
Find a better pdf, based on fitnesses.
| 8.363179
| 7.465424
| 1.120255
|
# Edge case for empty binary_list
if binary_list == []:
# With 0 bits, only one value can be represented,
# and we default to lower_bound
return lower_bound
# A little bit of math gets us a floating point
# number between upper and lower bound
# We look at the relative position of
# the integer corresponding to our binary list
# between the upper and lower bound,
# and offset that by lower bound
return ((
# Range between lower and upper bound
float(upper_bound - lower_bound)
# Divided by the maximum possible integer
/ (2**len(binary_list) - 1)
# Times the integer represented by the given binary
* binary_to_int(binary_list))
# Plus the lower bound
+ lower_bound)
|
def binary_to_float(binary_list, lower_bound, upper_bound)
|
Return a floating point number between lower and upper bounds, from binary.
Args:
binary_list: list<int>; List of 0s and 1s.
The number of bits in this list determine the number of possible
values between lower and upper bound.
Increase the size of binary_list for more precise floating points.
lower_bound: Minimum value for output, inclusive.
A binary list of 0s will have this value.
upper_bound: Maximum value for output, inclusive.
A binary list of 1s will have this value.
Returns:
float; A floating point number.
| 6.668513
| 6.580466
| 1.01338
|
# Edge case for empty binary_list
if binary_list == []:
# With 0 bits, only one value can be represented,
# and we default to lower_bound
return lower_bound
else:
# The builtin int construction can take a base argument,
# but it requires a string,
# so we convert our binary list to a string
integer = int(''.join([str(bit) for bit in binary_list]), 2)
# Trim if over upper_bound
if (upper_bound is not None) and integer + lower_bound > upper_bound:
# Bounce back. Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0]
return upper_bound - (integer % (upper_bound - lower_bound + 1))
else:
# Not over upper_bound
return integer + lower_bound
|
def binary_to_int(binary_list, lower_bound=0, upper_bound=None)
|
Return the base 10 integer corresponding to a binary list.
The maximum value is determined by the number of bits in binary_list,
and upper_bound. The greater allowed by the two.
Args:
binary_list: list<int>; List of 0s and 1s.
lower_bound: Minimum value for output, inclusive.
A binary list of 0s will have this value.
upper_bound: Maximum value for output, inclusive.
If greater than this bound, we "bounce back".
Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0]
Ex.
raw_integer = 11, upper_bound = 10, return = 10
raw_integer = 12, upper_bound = 10, return = 9
Returns:
int; Integer value of the binary input.
| 5.415735
| 4.110843
| 1.317427
|
binary_list = map(int, format(integer, 'b'))
if size is None:
return binary_list
else:
if len(binary_list) > size:
# Too long, take only last n
return binary_list[len(binary_list)-size:]
elif size > len(binary_list):
# Too short, pad
return [0]*(size-len(binary_list)) + binary_list
else:
# Just right
return binary_list
|
def _int_to_binary(integer, size=None)
|
Return bit list representation of integer.
If size is given, binary string is padded with 0s, or clipped to the size.
| 2.635563
| 2.758444
| 0.955453
|
return common.make_population(self._population_size,
self._generate_solution)
|
def next_population(self, population, fitnesses)
|
Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
| 15.41732
| 19.243692
| 0.801162
|
return common.random_real_solution(
self._solution_size, self._lower_bounds, self._upper_bounds)
|
def _generate_solution(self)
|
Return a single random solution.
| 9.556029
| 6.266217
| 1.525008
|
return [self._next_solution() for _ in range(self._population_size)]
|
def next_population(self, population, fitnesses)
|
Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions.
| 5.939965
| 6.830631
| 0.869607
|
groups = self._groups or self.get_children_paths(self.root_path)
for group in groups:
node = Node(name=group, parent=self.root)
self.root.children.append(node)
self._init_sub_groups(node)
|
def _build_tree(self)
|
Build a full or a partial tree, depending on the groups/sub-groups specified.
| 4.866901
| 4.062959
| 1.197871
|
if self._sub_groups:
for sub_group in self._sub_groups:
for component in split_path_components(sub_group):
fp = os.path.join(parent.full_path, component)
if os.path.exists(fp):
node = Node(name=component, parent=parent)
parent.children.append(node)
else:
node = parent.create_cgroup(component)
parent = node
self._init_children(node)
else:
self._init_children(parent)
|
def _init_sub_groups(self, parent)
|
Initialise sub-groups, and create any that do not already exist.
| 3.456556
| 3.403509
| 1.015586
|
for dir_name in self.get_children_paths(parent.full_path):
child = Node(name=dir_name, parent=parent)
parent.children.append(child)
self._init_children(child)
|
def _init_children(self, parent)
|
Initialise each node's children - essentially build the tree.
| 3.587498
| 3.219149
| 1.114424
|
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
|
def full_path(self)
|
Absolute system path to the node
| 3.36552
| 3.043421
| 1.105835
|
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
|
def path(self)
|
Node's relative path from the root node
| 3.532244
| 3.086197
| 1.14453
|
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
|
def _get_node_type(self)
|
Returns the current node's type
| 3.979071
| 3.73784
| 1.064538
|
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
|
def _get_controller_type(self)
|
Returns the current node's controller type
| 5.547673
| 4.369713
| 1.269574
|
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
|
def create_cgroup(self, name)
|
Create a cgroup by name and attach it under this node.
| 3.855501
| 3.517298
| 1.096154
|
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
|
def delete_cgroup(self, name)
|
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
| 3.613364
| 3.799039
| 0.951126
|
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
|
def delete_empty_children(self)
|
Walk through the children of this node and delete any that are empty.
| 2.665764
| 2.436436
| 1.094124
|
if self.controllers.get(node.controller_type, None):
raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format(
node,
node.controller_type
))
self.nodes.append(node)
if node.controller:
self.controllers[node.controller_type] = node.controller
setattr(self, node.controller_type, node.controller)
|
def add_node(self, node)
|
A a Node object to the group. Only one node per cgroup is supported
| 3.912154
| 3.779196
| 1.035182
|
tasks = set()
for node in walk_tree(self):
for ctrl in node.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
def group_tasks(self)
|
All tasks in the hierarchy, affected by this group.
| 6.295978
| 5.724887
| 1.099756
|
tasks = set()
for ctrl in self.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
def tasks(self)
|
Tasks in this exact group
| 4.848139
| 4.413859
| 1.09839
|
return os.path.join(self.node.full_path, filename)
|
def filepath(self, filename)
|
The full path to a file
| 9.058199
| 9.48872
| 0.954628
|
with open(self.filepath(filename)) as f:
return f.read().strip()
|
def get_property(self, filename)
|
Opens the file and reads the value
| 5.829428
| 5.596986
| 1.04153
|
with open(self.filepath(filename), "w") as f:
return f.write(str(value))
|
def set_property(self, filename, value)
|
Opens the file and writes the value
| 5.258518
| 4.852909
| 1.083581
|
yield root
for child in root.children:
for el in walk_tree(child):
yield el
|
def walk_tree(root)
|
Pre-order depth-first
| 3.213095
| 3.448675
| 0.93169
|
for child in root.children:
for el in walk_up_tree(child):
yield el
yield root
|
def walk_up_tree(root)
|
Post-order depth-first
| 3.577699
| 3.630008
| 0.98559
|
stat = os.lstat(dev_path)
return os.major(stat.st_rdev), os.minor(stat.st_rdev)
|
def get_device_major_minor(dev_path)
|
Returns the device (major, minor) tuple for simplicity
:param dev_path: Path to the device
:return: (device major, device minor)
:rtype: (int, int)
| 2.662945
| 3.014637
| 0.883339
|
schema = Schema(self.__class__.SCHEMA)
resolver = RefResolver.from_schema(
schema,
store=REGISTRY,
)
validate(self, schema, resolver=resolver)
|
def validate(self)
|
Validate that this instance matches its schema.
| 6.503329
| 5.501337
| 1.182136
|
with closing(StringIO()) as fileobj:
self.dump(fileobj)
return fileobj.getvalue()
|
def dumps(self)
|
Dump this instance as YAML.
| 4.231208
| 3.853137
| 1.09812
|
with closing(StringIO(s)) as fileobj:
return cls.load(fileobj)
|
def loads(cls, s)
|
Load an instance of this class from YAML.
| 4.994169
| 5.13394
| 0.972775
|
schema = self.__class__.SCHEMA
# first try plain properties
plain_schema = schema.get("properties", {}).get(key)
if plain_schema is not None:
return plain_schema
# then try pattern properties
pattern_properties = schema.get("patternProperties", {})
for pattern, pattern_schema in pattern_properties.items():
if match(pattern, key):
return pattern_schema
# finally try additional properties (defaults to true per JSON Schema)
return schema.get("additionalProperties", True)
|
def property_schema(self, key)
|
Lookup the schema for a specific property.
| 3.254154
| 3.201818
| 1.016346
|
return type(class_name, (base,), dict(SCHEMA=schema))
|
def make(class_name, base, schema)
|
Create a new schema aware type.
| 4.791976
| 4.732973
| 1.012466
|
class_name = make_class_name(name)
cls = register(make(class_name, base, schema))
globals()[class_name] = cls
|
def make_definition(name, base, schema)
|
Create a new definition.
| 5.427961
| 5.639076
| 0.962562
|
definition_name = make_definition_name(cls.__name__)
REGISTRY[definition_name] = cls
return cls
|
def register(cls)
|
Register a class.
| 5.465171
| 5.475899
| 0.998041
|
if not isinstance(schema, dict) or "$ref" not in schema:
return None
ref = schema["$ref"]
return REGISTRY.get(ref)
|
def lookup(schema)
|
Lookup a class by property schema.
| 4.015959
| 3.563607
| 1.126937
|
if self._connected or self.is_alive():
raise ConnectionExistsError("Already connected")
# After starting the thread we wait for it to post us
# an event signifying that connection is established. This
# ensures that the caller only resumes when we are fully connected.
self.start()
with self._lock:
self._connect_cond.wait_for(lambda: self._connected)
|
def connect(self)
|
Connects to the lutron controller.
| 8.866474
| 8.655163
| 1.024414
|
_LOGGER.debug("Sending: %s" % cmd)
try:
self._telnet.write(cmd.encode('ascii') + b'\r\n')
except BrokenPipeError:
self._disconnect_locked()
|
def _send_locked(self, cmd)
|
Sends the specified command to the lutron controller.
Assumes self._lock is held.
| 4.433293
| 4.01655
| 1.103757
|
self._telnet = telnetlib.Telnet(self._host)
self._telnet.read_until(LutronConnection.USER_PROMPT)
self._telnet.write(self._user + b'\r\n')
self._telnet.read_until(LutronConnection.PW_PROMPT)
self._telnet.write(self._password + b'\r\n')
self._telnet.read_until(LutronConnection.PROMPT)
self._send_locked("#MONITORING,12,2")
self._send_locked("#MONITORING,255,2")
self._send_locked("#MONITORING,3,1")
self._send_locked("#MONITORING,4,1")
self._send_locked("#MONITORING,5,1")
self._send_locked("#MONITORING,6,1")
self._send_locked("#MONITORING,8,1")
|
def _do_login_locked(self)
|
Executes the login procedure (telnet) as well as setting up some
connection defaults like turning off the prompt, etc.
| 2.389072
| 2.252195
| 1.060775
|
self._connected = False
self._connect_cond.notify_all()
self._telnet = None
_LOGGER.warning("Disconnected")
|
def _disconnect_locked(self)
|
Closes the current connection. Assume self._lock is held.
| 8.313778
| 7.182977
| 1.157428
|
with self._lock:
if not self._connected:
_LOGGER.info("Connecting")
self._do_login_locked()
self._connected = True
self._connect_cond.notify_all()
_LOGGER.info("Connected")
|
def _maybe_reconnect(self)
|
Reconnects to the controller if we have been previously disconnected.
| 5.344122
| 4.925615
| 1.084966
|
_LOGGER.info("Started")
while True:
self._maybe_reconnect()
line = ''
try:
# If someone is sending a command, we can lose our connection so grab a
# copy beforehand. We don't need the lock because if the connection is
# open, we are the only ones that will read from telnet (the reconnect
# code runs synchronously in this loop).
t = self._telnet
if t is not None:
line = t.read_until(b"\n")
except EOFError:
try:
self._lock.acquire()
self._disconnect_locked()
continue
finally:
self._lock.release()
self._recv_cb(line.decode('ascii').rstrip())
|
def run(self)
|
Main thread function to maintain connection and receive remote status.
| 7.260301
| 6.823193
| 1.064062
|
import xml.etree.ElementTree as ET
root = ET.fromstring(self._xml_db_str)
# The structure is something like this:
# <Areas>
# <Area ...>
# <DeviceGroups ...>
# <Scenes ...>
# <ShadeGroups ...>
# <Outputs ...>
# <Areas ...>
# <Area ...>
# First area is useless, it's the top-level project area that defines the
# "house". It contains the real nested Areas tree, which is the one we want.
top_area = root.find('Areas').find('Area')
self.project_name = top_area.get('Name')
areas = top_area.find('Areas')
for area_xml in areas.getiterator('Area'):
area = self._parse_area(area_xml)
self.areas.append(area)
return True
|
def parse(self)
|
Main entrypoint into the parser. It interprets and creates all the
relevant Lutron objects and stuffs them into the appropriate hierarchy.
| 4.886105
| 4.757271
| 1.027082
|
area = Area(self._lutron,
name=area_xml.get('Name'),
integration_id=int(area_xml.get('IntegrationID')),
occupancy_group_id=area_xml.get('OccupancyGroupAssignedToID'))
for output_xml in area_xml.find('Outputs'):
output = self._parse_output(output_xml)
area.add_output(output)
# device group in our case means keypad
# device_group.get('Name') is the location of the keypad
for device_group in area_xml.find('DeviceGroups'):
if device_group.tag == 'DeviceGroup':
devs = device_group.find('Devices')
elif device_group.tag == 'Device':
devs = [device_group]
else:
_LOGGER.info("Unknown tag in DeviceGroups child %s" % devs)
devs = []
for device_xml in devs:
if device_xml.tag != 'Device':
continue
if device_xml.get('DeviceType') in (
'SEETOUCH_KEYPAD',
'SEETOUCH_TABLETOP_KEYPAD',
'PICO_KEYPAD',
'HYBRID_SEETOUCH_KEYPAD',
'MAIN_REPEATER'):
keypad = self._parse_keypad(device_xml)
area.add_keypad(keypad)
elif device_xml.get('DeviceType') == 'MOTION_SENSOR':
motion_sensor = self._parse_motion_sensor(device_xml)
area.add_sensor(motion_sensor)
#elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER':
return area
|
def _parse_area(self, area_xml)
|
Parses an Area tag, which is effectively a room, depending on how the
Lutron controller programming was done.
| 3.349988
| 3.203175
| 1.045834
|
output = Output(self._lutron,
name=output_xml.get('Name'),
watts=int(output_xml.get('Wattage')),
output_type=output_xml.get('OutputType'),
integration_id=int(output_xml.get('IntegrationID')))
return output
|
def _parse_output(self, output_xml)
|
Parses an output, which is generally a switch controlling a set of
lights/outlets, etc.
| 4.889591
| 4.354074
| 1.122992
|
keypad = Keypad(self._lutron,
name=keypad_xml.get('Name'),
integration_id=int(keypad_xml.get('IntegrationID')))
components = keypad_xml.find('Components')
if not components:
return keypad
for comp in components:
if comp.tag != 'Component':
continue
comp_type = comp.get('ComponentType')
if comp_type == 'BUTTON':
button = self._parse_button(keypad, comp)
keypad.add_button(button)
elif comp_type == 'LED':
led = self._parse_led(keypad, comp)
keypad.add_led(led)
return keypad
|
def _parse_keypad(self, keypad_xml)
|
Parses a keypad device (the Visor receiver is technically a keypad too).
| 2.301383
| 2.237399
| 1.028598
|
button_xml = component_xml.find('Button')
name = button_xml.get('Engraving')
button_type = button_xml.get('ButtonType')
direction = button_xml.get('Direction')
# Hybrid keypads have dimmer buttons which have no engravings.
if button_type == 'SingleSceneRaiseLower':
name = 'Dimmer ' + direction
if not name:
name = "Unknown Button"
button = Button(self._lutron, keypad,
name=name,
num=int(component_xml.get('ComponentNumber')),
button_type=button_type,
direction=direction)
return button
|
def _parse_button(self, keypad, component_xml)
|
Parses a button device that part of a keypad.
| 5.065372
| 4.748192
| 1.0668
|
component_num = int(component_xml.get('ComponentNumber'))
led_num = component_num - 80
led = Led(self._lutron, keypad,
name=('LED %d' % led_num),
led_num=led_num,
component_num=component_num)
return led
|
def _parse_led(self, keypad, component_xml)
|
Parses an LED device that part of a keypad.
| 4.646671
| 4.364254
| 1.064712
|
return MotionSensor(self._lutron,
name=sensor_xml.get('Name'),
integration_id=int(sensor_xml.get('IntegrationID')))
|
def _parse_motion_sensor(self, sensor_xml)
|
Parses a motion sensor object.
TODO: We don't actually do anything with these yet. There's a lot of info
that needs to be managed to do this right. We'd have to manage the occupancy
groups, what's assigned to them, and when they go (un)occupied. We'll handle
this later.
| 8.320045
| 7.985301
| 1.04192
|
if not isinstance(obj, LutronEntity):
raise InvalidSubscription("Subscription target not a LutronEntity")
_LOGGER.warning("DEPRECATED: Subscribing via Lutron.subscribe is obsolete. "
"Please use LutronEntity.subscribe")
if obj not in self._legacy_subscribers:
self._legacy_subscribers[obj] = handler
obj.subscribe(self._dispatch_legacy_subscriber, None)
|
def subscribe(self, obj, handler)
|
Subscribes to status updates of the requested object.
DEPRECATED
The handler will be invoked when the controller sends a notification
regarding changed state. The user can then further query the object for the
state itself.
| 6.22123
| 6.07162
| 1.024641
|
ids = self._ids.setdefault(cmd_type, {})
if obj.id in ids:
raise IntegrationIdExistsError
self._ids[cmd_type][obj.id] = obj
|
def register_id(self, cmd_type, obj)
|
Registers an object (through its integration id) to receive update
notifications. This is the core mechanism how Output and Keypad objects get
notified when the controller sends status updates.
| 4.68467
| 3.938765
| 1.189376
|
if obj in self._legacy_subscribers:
self._legacy_subscribers[obj](obj)
|
def _dispatch_legacy_subscriber(self, obj, *args, **kwargs)
|
This dispatches the registered callback for 'obj'. This is only used
for legacy subscribers since new users should register with the target
object directly.
| 6.033593
| 4.840557
| 1.246467
|
if line == '':
return
# Only handle query response messages, which are also sent on remote status
# updates (e.g. user manually pressed a keypad button)
if line[0] != Lutron.OP_RESPONSE:
_LOGGER.debug("ignoring %s" % line)
return
parts = line[1:].split(',')
cmd_type = parts[0]
integration_id = int(parts[1])
args = parts[2:]
if cmd_type not in self._ids:
_LOGGER.info("Unknown cmd %s (%s)" % (cmd_type, line))
return
ids = self._ids[cmd_type]
if integration_id not in ids:
_LOGGER.warning("Unknown id %d (%s)" % (integration_id, line))
return
obj = ids[integration_id]
handled = obj.handle_update(args)
|
def _recv(self, line)
|
Invoked by the connection manager to process incoming data.
| 4.4438
| 4.38795
| 1.012728
|
out_cmd = ",".join(
(cmd, str(integration_id)) + tuple((str(x) for x in args)))
self._conn.send(op + out_cmd)
|
def send(self, op, cmd, integration_id, *args)
|
Formats and sends the requested command to the Lutron controller.
| 6.035439
| 5.822347
| 1.036599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.