Search is not available for this dataset
text
stringlengths
75
104k
def read_graph(filename): """! @brief Read graph from file in GRPR format. @param[in] filename (string): Path to file with graph in GRPR format. @return (graph) Graph that is read from file. """ file = open(filename, 'r'); comments = ""; space_descr = []; data = []; data_type = None; map_data_repr = dict(); # Used as a temporary buffer only when input graph is represented by edges. for line in file: if (line[0] == 'c' or line[0] == 'p'): comments += line[1:]; elif (line[0] == 'r'): node_coordinates = [float(val) for val in line[1:].split()]; if (len(node_coordinates) != 2): raise NameError('Invalid format of space description for node (only 2-dimension space is supported)'); space_descr.append( [float(val) for val in line[1:].split()] ); elif (line[0] == 'm'): if ( (data_type is not None) and (data_type != 'm') ): raise NameError('Invalid format of graph representation (only one type should be used)'); data_type = 'm'; data.append( [float(val) for val in line[1:].split()] ); elif (line[0] == 'v'): if ( (data_type is not None) and (data_type != 'v') ): raise NameError('Invalid format of graph representation (only one type should be used)'); data_type = 'v'; data.append( [float(val) for val in line[1:].split()] ); elif (line[0] == 'e'): if ( (data_type is not None) and (data_type != 'e') ): raise NameError('Invalid format of graph representation (only one type should be used)'); data_type = 'e'; vertices = [int(val) for val in line[1:].split()]; if (vertices[0] not in map_data_repr): map_data_repr[ vertices[0] ] = [ vertices[1] ]; else: map_data_repr[ vertices[0] ].append(vertices[1]) if (vertices[1] not in map_data_repr): map_data_repr[ vertices[1] ] = [ vertices[0] ]; else: map_data_repr[ vertices[1] ].append(vertices[0]); elif (len(line.strip()) == 0): continue; else: print(line); raise NameError('Invalid format of file with graph description'); # In case of edge representation result should be copied. if (data_type == 'e'): for index in range(len(map_data_repr)): data.append([0] * len(map_data_repr)); for index_neighbour in map_data_repr[index + 1]: data[index][index_neighbour - 1] = 1; file.close(); # Set graph description graph_descr = None; if (data_type == 'm'): graph_descr = type_graph_descr.GRAPH_MATRIX_DESCR; elif (data_type == 'v'): graph_descr = type_graph_descr.GRAPH_VECTOR_DESCR; elif (data_type == 'e'): graph_descr = type_graph_descr.GRAPH_MATRIX_DESCR; else: raise NameError('Invalid format of file with graph description'); if (space_descr != []): if (len(data) != len(space_descr)): raise NameError("Invalid format of file with graph - number of nodes is different in space representation and graph description"); return graph(data, graph_descr, space_descr, comments);
def draw_graph(graph_instance, map_coloring = None): """! @brief Draw graph. @param[in] graph_instance (graph): Graph that should be drawn. @param[in] map_coloring (list): List of color indexes for each vertex. Size of this list should be equal to size of graph (number of vertices). If it's not specified (None) than graph without coloring will be dwarn. @warning Graph can be represented if there is space representation for it. """ if (graph_instance.space_description is None): raise NameError("The graph haven't got representation in space"); if (map_coloring is not None): if (len(graph_instance) != len(map_coloring)): raise NameError("Size of graph should be equal to size coloring map"); fig = plt.figure(); axes = fig.add_subplot(111); available_colors = ['#00a2e8', '#22b14c', '#ed1c24', '#fff200', '#000000', '#a349a4', '#ffaec9', '#7f7f7f', '#b97a57', '#c8bfe7', '#880015', '#ff7f27', '#3f48cc', '#c3c3c3', '#ffc90e', '#efe4b0', '#b5e61d', '#99d9ea', '#7092b4', '#ffffff']; if (map_coloring is not None): if (len(map_coloring) > len(available_colors)): raise NameError('Impossible to represent colored graph due to number of specified colors.'); x_maximum = -float('inf'); x_minimum = float('inf'); y_maximum = -float('inf'); y_minimum = float('inf'); for i in range(0, len(graph_instance.space_description), 1): if (graph_instance.type_graph_descr == type_graph_descr.GRAPH_MATRIX_DESCR): for j in range(i, len(graph_instance.space_description), 1): # draw connection between two points only one time if (graph_instance.data[i][j] == 1): axes.plot([graph_instance.space_description[i][0], graph_instance.space_description[j][0]], [graph_instance.space_description[i][1], graph_instance.space_description[j][1]], 'k-', linewidth = 1.5); elif (graph_instance.type_graph_descr == type_graph_descr.GRAPH_VECTOR_DESCR): for j in graph_instance.data[i]: if (i > j): # draw connection between two points only one time axes.plot([graph_instance.space_description[i][0], graph_instance.space_description[j][0]], [graph_instance.space_description[i][1], graph_instance.space_description[j][1]], 'k-', linewidth = 1.5); color_node = 'b'; if (map_coloring is not None): color_node = colors.hex2color(available_colors[map_coloring[i]]); axes.plot(graph_instance.space_description[i][0], graph_instance.space_description[i][1], color = color_node, marker = 'o', markersize = 20); if (x_maximum < graph_instance.space_description[i][0]): x_maximum = graph_instance.space_description[i][0]; if (x_minimum > graph_instance.space_description[i][0]): x_minimum = graph_instance.space_description[i][0]; if (y_maximum < graph_instance.space_description[i][1]): y_maximum = graph_instance.space_description[i][1]; if (y_minimum > graph_instance.space_description[i][1]): y_minimum = graph_instance.space_description[i][1]; plt.xlim(x_minimum - 0.5, x_maximum + 0.5); plt.ylim(y_minimum - 0.5, y_maximum + 0.5); plt.show();
def process(self): """! @brief Performs cluster analysis in line with Fuzzy C-Means algorithm. @see get_clusters() @see get_centers() @see get_membership() """ if self.__ccore is True: self.__process_by_ccore() else: self.__process_by_python() return self
def __process_by_ccore(self): """! @brief Performs cluster analysis using C/C++ implementation. """ result = wrapper.fcm_algorithm(self.__data, self.__centers, self.__m, self.__tolerance, self.__itermax) self.__clusters = result[wrapper.fcm_package_indexer.INDEX_CLUSTERS] self.__centers = result[wrapper.fcm_package_indexer.INDEX_CENTERS] self.__membership = result[wrapper.fcm_package_indexer.INDEX_MEMBERSHIP]
def __process_by_python(self): """! @brief Performs cluster analysis using Python implementation. """ self.__data = numpy.array(self.__data) self.__centers = numpy.array(self.__centers) self.__membership = numpy.zeros((len(self.__data), len(self.__centers))) change = float('inf') iteration = 0 while change > self.__tolerance and iteration < self.__itermax: self.__update_membership() centers = self.__calculate_centers() change = self.__calculate_changes(centers) self.__centers = centers iteration += 1 self.__extract_clusters()
def __calculate_centers(self): """! @brief Calculate center using membership of each cluster. @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. @return (numpy.array) Updated centers. """ dimension = self.__data.shape[1] centers = numpy.zeros((len(self.__centers), dimension)) for i in range(len(self.__centers)): # multiplication '@' requires python version 3.5 centers[i] = numpy.divide(self.__membership[:, i] @ self.__data, numpy.sum(self.__membership[:, i])) return centers
def __update_membership(self): """! @brief Update membership for each point in line with current cluster centers. """ data_difference = numpy.zeros((len(self.__centers), len(self.__data))) for i in range(len(self.__centers)): data_difference[i] = numpy.sum(numpy.square(self.__data - self.__centers[i]), axis=1) for i in range(len(self.__data)): for j in range(len(self.__centers)): divider = sum([pow(data_difference[j][i] / data_difference[k][i], self.__degree) for k in range(len(self.__centers)) if data_difference[k][i] != 0.0]) if divider != 0.0: self.__membership[i][j] = 1.0 / divider else: self.__membership[i][j] = 1.0
def __calculate_changes(self, updated_centers): """! @brief Calculate changes between centers. @return (float) Maximum change between centers. """ changes = numpy.sum(numpy.square(self.__centers - updated_centers), axis=1).T return numpy.max(changes)
def collect_global_best(self, best_chromosome, best_fitness_function): """! @brief Stores the best chromosome and its fitness function's value. @param[in] best_chromosome (list): The best chromosome that were observed. @param[in] best_fitness_function (float): Fitness function value of the best chromosome. """ if not self._need_global_best: return self._global_best_result['chromosome'].append(best_chromosome) self._global_best_result['fitness_function'].append(best_fitness_function)
def collect_population_best(self, best_chromosome, best_fitness_function): """! @brief Stores the best chromosome for current specific iteration and its fitness function's value. @param[in] best_chromosome (list): The best chromosome on specific iteration. @param[in] best_fitness_function (float): Fitness function value of the chromosome. """ if not self._need_population_best: return self._best_population_result['chromosome'].append(best_chromosome) self._best_population_result['fitness_function'].append(best_fitness_function)
def collect_mean(self, fitness_functions): """! @brief Stores average value of fitness function among chromosomes on specific iteration. @param[in] fitness_functions (float): Average value of fitness functions among chromosomes. """ if not self._need_mean_ff: return self._mean_ff_result.append(np.mean(fitness_functions))
def show_evolution(observer, start_iteration = 0, stop_iteration=None, ax=None, display=True): """! @brief Displays evolution of fitness function for the best chromosome, for the current best chromosome and average value among all chromosomes. @param[in] observer (ga_observer): Genetic algorithm observer that was used for collecting evolution in the algorithm and where whole required information for visualization is stored. @param[in] start_iteration (uint): Iteration from that evolution should be shown. @param[in] stop_iteration (uint): Iteration after that evolution shouldn't be shown. @param[in] ax (Axes): Canvas where evolution should be displayed. @param[in] display (bool): If 'True' then visualization of the evolution will be shown by the function. This argument should be 'False' if you want to add something else to the canvas and display it later. @return (Axis) Canvas where evolution was shown. """ if (ax is None): _, ax = plt.subplots(1) ax.set_title("Evolution") if stop_iteration is None: stop_iteration = len(observer) line_best, = ax.plot(observer.get_global_best()['fitness_function'][start_iteration:stop_iteration], 'r') line_current, = ax.plot(observer.get_population_best()['fitness_function'][start_iteration:stop_iteration], 'k') line_mean, = ax.plot(observer.get_mean_fitness_function()[start_iteration:stop_iteration], 'c') if start_iteration < (stop_iteration - 1): ax.set_xlim([start_iteration, (stop_iteration - 1)]) ax.set_xlabel("Iteration") ax.set_ylabel("Fitness function") ax.legend([line_best, line_current, line_mean], ["The best pop.", "Cur. best pop.", "Average"], prop={'size': 10}) ax.grid() if display is True: plt.show() return ax
def show_clusters(data, observer, marker='.', markersize=None): """! @brief Shows allocated clusters by the genetic algorithm. @param[in] data (list): Input data that was used for clustering process by the algorithm. @param[in] observer (ga_observer): Observer that was used for collection information about clustering process. @param[in] marker (char): Type of marker that should be used for object (point) representation. @param[in] markersize (uint): Size of the marker that is used for object (point) representation. @note If you have clusters instead of observer then 'cluster_visualizer' can be used for visualization purposes. @see cluster_visualizer """ figure = plt.figure() ax1 = figure.add_subplot(121) clusters = ga_math.get_clusters_representation(observer.get_global_best()['chromosome'][-1]) visualizer = cluster_visualizer(1, 2) visualizer.append_clusters(clusters, data, 0, marker, markersize) visualizer.show(figure, display=False) ga_visualizer.show_evolution(observer, 0, None, ax1, True)
def animate_cluster_allocation(data, observer, animation_velocity=75, movie_fps=5, save_movie=None): """! @brief Animate clustering process of genetic clustering algorithm. @details This method can be also used for rendering movie of clustering process and 'ffmpeg' is required for that purpuse. @param[in] data (list): Input data that was used for clustering process by the algorithm. @param[in] observer (ga_observer): Observer that was used for collection information about clustering process. Be sure that whole information was collected by the observer. @param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only). @param[in] movie_fps (uint): Defines frames per second (for rendering movie only). @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter. """ figure = plt.figure() def init_frame(): return frame_generation(0) def frame_generation(index_iteration): figure.clf() figure.suptitle("Clustering genetic algorithm (iteration: " + str(index_iteration) + ")", fontsize=18, fontweight='bold') visualizer = cluster_visualizer(4, 2, ["The best pop. on step #" + str(index_iteration), "The best population"]) local_minimum_clusters = ga_math.get_clusters_representation(observer.get_population_best()['chromosome'][index_iteration]) visualizer.append_clusters(local_minimum_clusters, data, 0) global_minimum_clusters = ga_math.get_clusters_representation(observer.get_global_best()['chromosome'][index_iteration]) visualizer.append_clusters(global_minimum_clusters, data, 1) ax1 = plt.subplot2grid((2, 2), (1, 0), colspan=2) ga_visualizer.show_evolution(observer, 0, index_iteration + 1, ax1, False) visualizer.show(figure, shift=0, display=False) figure.subplots_adjust(top=0.85) return [figure.gca()] iterations = len(observer) cluster_animation = animation.FuncAnimation(figure, frame_generation, iterations, interval=animation_velocity, init_func=init_frame, repeat_delay=5000) if save_movie is not None: cluster_animation.save(save_movie, writer='ffmpeg', fps=movie_fps, bitrate=1500) else: plt.show()
def process(self): """! @brief Perform clustering procedure in line with rule of genetic clustering algorithm. @see get_clusters() """ # Initialize population chromosomes = self._init_population(self._count_clusters, len(self._data), self._chromosome_count) # Initialize the Best solution best_chromosome, best_ff, first_fitness_functions \ = self._get_best_chromosome(chromosomes, self._data, self._count_clusters) # Save best result into observer if self._observer is not None: self._observer.collect_global_best(best_chromosome, best_ff) self._observer.collect_population_best(best_chromosome, best_ff) self._observer.collect_mean(first_fitness_functions) # Next population for _ in range(self._population_count): # Select chromosomes = self._select(chromosomes, self._data, self._count_clusters, self._select_coeff) # Crossover self._crossover(chromosomes) # Mutation self._mutation(chromosomes, self._count_clusters, self._count_mutation_gens, self._coeff_mutation_count) # Update the Best Solution new_best_chromosome, new_best_ff, fitness_functions \ = self._get_best_chromosome(chromosomes, self._data, self._count_clusters) # Get best chromosome if new_best_ff < best_ff: best_ff = new_best_ff best_chromosome = new_best_chromosome # Save best result into observer if self._observer is not None: self._observer.collect_global_best(best_chromosome, best_ff) self._observer.collect_population_best(new_best_chromosome, new_best_ff) self._observer.collect_mean(fitness_functions) # Save result self._result_clustering['best_chromosome'] = best_chromosome self._result_clustering['best_fitness_function'] = best_ff return best_chromosome, best_ff
def _select(chromosomes, data, count_clusters, select_coeff): """! @brief Performs selection procedure where new chromosomes are calculated. @param[in] chromosomes (numpy.array): Chromosomes """ # Calc centers centres = ga_math.get_centres(chromosomes, data, count_clusters) # Calc fitness functions fitness = genetic_algorithm._calc_fitness_function(centres, data, chromosomes) for _idx in range(len(fitness)): fitness[_idx] = math.exp(1 + fitness[_idx] * select_coeff) # Calc probability vector probabilities = ga_math.calc_probability_vector(fitness) # Select P chromosomes with probabilities new_chromosomes = np.zeros(chromosomes.shape, dtype=np.int) # Selecting for _idx in range(len(chromosomes)): new_chromosomes[_idx] = chromosomes[ga_math.get_uniform(probabilities)] return new_chromosomes
def _crossover(chromosomes): """! @brief Crossover procedure. """ # Get pairs to Crossover pairs_to_crossover = np.array(range(len(chromosomes))) # Set random pairs np.random.shuffle(pairs_to_crossover) # Index offset ( pairs_to_crossover split into 2 parts : [V1, V2, .. | P1, P2, ...] crossover between V<->P) offset_in_pair = int(len(pairs_to_crossover) / 2) # For each pair for _idx in range(offset_in_pair): # Generate random mask for crossover crossover_mask = genetic_algorithm._get_crossover_mask(len(chromosomes[_idx])) # Crossover a pair genetic_algorithm._crossover_a_pair(chromosomes[pairs_to_crossover[_idx]], chromosomes[pairs_to_crossover[_idx + offset_in_pair]], crossover_mask)
def _mutation(chromosomes, count_clusters, count_gen_for_mutation, coeff_mutation_count): """! @brief Mutation procedure. """ # Count gens in Chromosome count_gens = len(chromosomes[0]) # Get random chromosomes for mutation random_idx_chromosomes = np.array(range(len(chromosomes))) np.random.shuffle(random_idx_chromosomes) # for _idx_chromosome in range(int(len(random_idx_chromosomes) * coeff_mutation_count)): # for _ in range(count_gen_for_mutation): # Get random gen gen_num = np.random.randint(count_gens) # Set random cluster chromosomes[random_idx_chromosomes[_idx_chromosome]][gen_num] = np.random.randint(count_clusters)
def _crossover_a_pair(chromosome_1, chromosome_2, mask): """! @brief Crossovers a pair of chromosomes. @param[in] chromosome_1 (numpy.array): The first chromosome for crossover. @param[in] chromosome_2 (numpy.array): The second chromosome for crossover. @param[in] mask (numpy.array): Crossover mask that defines which genes should be swapped. """ for _idx in range(len(chromosome_1)): if mask[_idx] == 1: # Swap values chromosome_1[_idx], chromosome_2[_idx] = chromosome_2[_idx], chromosome_1[_idx]
def _get_crossover_mask(mask_length): """! @brief Crossover mask to crossover a pair of chromosomes. @param[in] mask_length (uint): Length of the mask. """ # Initialize mask mask = np.zeros(mask_length) # Set a half of array to 1 mask[:int(int(mask_length) / 2)] = 1 # Random shuffle np.random.shuffle(mask) return mask
def _init_population(count_clusters, count_data, chromosome_count): """! @brief Returns first population as a uniform random choice. @param[in] count_clusters (uint): Amount of clusters that should be allocated. @param[in] count_data (uint): Data size that is used for clustering process. @param[in] chromosome_count (uint):Amount of chromosome that is used for clustering. """ population = np.random.randint(count_clusters, size=(chromosome_count, count_data)) return population
def _get_best_chromosome(chromosomes, data, count_clusters): """! @brief Returns the current best chromosome. @param[in] chromosomes (list): Chromosomes that are used for searching. @param[in] data (list): Input data that is used for clustering process. @param[in] count_clusters (uint): Amount of clusters that should be allocated. @return (list, float, list) The best chromosome, its fitness function value and fitness function values for all chromosomes. """ # Calc centers centres = ga_math.get_centres(chromosomes, data, count_clusters) # Calc Fitness functions fitness_functions = genetic_algorithm._calc_fitness_function(centres, data, chromosomes) # Index of the best chromosome best_chromosome_idx = fitness_functions.argmin() # Get chromosome with the best fitness function return chromosomes[best_chromosome_idx], fitness_functions[best_chromosome_idx], fitness_functions
def _calc_fitness_function(centres, data, chromosomes): """! @brief Calculate fitness function values for chromosomes. @param[in] centres (list): Cluster centers. @param[in] data (list): Input data that is used for clustering process. @param[in] chromosomes (list): Chromosomes whose fitness function's values are calculated. @return (list) Fitness function value for each chromosome correspondingly. """ # Get count of chromosomes and clusters count_chromosome = len(chromosomes) # Initialize fitness function values fitness_function = np.zeros(count_chromosome) # Calc fitness function for each chromosome for _idx_chromosome in range(count_chromosome): # Get centers for a selected chromosome centres_data = np.zeros(data.shape) # Fill data centres for _idx in range(len(data)): centres_data[_idx] = centres[_idx_chromosome][chromosomes[_idx_chromosome][_idx]] # Get City Block distance for a chromosome fitness_function[_idx_chromosome] += np.sum(abs(data - centres_data)) return fitness_function
def get_distance(self, entry, type_measurement): """! @brief Calculates distance between two clusters in line with measurement type. @details In case of usage CENTROID_EUCLIDIAN_DISTANCE square euclidian distance will be returned. Square root should be taken from the result for obtaining real euclidian distance between entries. @param[in] entry (cfentry): Clustering feature to which distance should be obtained. @param[in] type_measurement (measurement_type): Distance measurement algorithm between two clusters. @return (double) Distance between two clusters. """ if (type_measurement is measurement_type.CENTROID_EUCLIDEAN_DISTANCE): return euclidean_distance_square(entry.get_centroid(), self.get_centroid()); elif (type_measurement is measurement_type.CENTROID_MANHATTAN_DISTANCE): return manhattan_distance(entry.get_centroid(), self.get_centroid()); elif (type_measurement is measurement_type.AVERAGE_INTER_CLUSTER_DISTANCE): return self.__get_average_inter_cluster_distance(entry); elif (type_measurement is measurement_type.AVERAGE_INTRA_CLUSTER_DISTANCE): return self.__get_average_intra_cluster_distance(entry); elif (type_measurement is measurement_type.VARIANCE_INCREASE_DISTANCE): return self.__get_variance_increase_distance(entry); else: assert 0;
def get_centroid(self): """! @brief Calculates centroid of cluster that is represented by the entry. @details It's calculated once when it's requested after the last changes. @return (list) Centroid of cluster that is represented by the entry. """ if (self.__centroid is not None): return self.__centroid; self.__centroid = [0] * len(self.linear_sum); for index_dimension in range(0, len(self.linear_sum)): self.__centroid[index_dimension] = self.linear_sum[index_dimension] / self.number_points; return self.__centroid;
def get_radius(self): """! @brief Calculates radius of cluster that is represented by the entry. @details It's calculated once when it's requested after the last changes. @return (double) Radius of cluster that is represented by the entry. """ if (self.__radius is not None): return self.__radius; centroid = self.get_centroid(); radius_part_1 = self.square_sum; radius_part_2 = 0.0; radius_part_3 = 0.0; if (type(centroid) == list): radius_part_2 = 2.0 * sum(list_math_multiplication(self.linear_sum, centroid)); radius_part_3 = self.number_points * sum(list_math_multiplication(centroid, centroid)); else: radius_part_2 = 2.0 * self.linear_sum * centroid; radius_part_3 = self.number_points * centroid * centroid; self.__radius = ( (1.0 / self.number_points) * (radius_part_1 - radius_part_2 + radius_part_3) ) ** 0.5; return self.__radius;
def get_diameter(self): """! @brief Calculates diameter of cluster that is represented by the entry. @details It's calculated once when it's requested after the last changes. @return (double) Diameter of cluster that is represented by the entry. """ if (self.__diameter is not None): return self.__diameter; diameter_part = 0.0; if (type(self.linear_sum) == list): diameter_part = self.square_sum * self.number_points - 2.0 * sum(list_math_multiplication(self.linear_sum, self.linear_sum)) + self.square_sum * self.number_points; else: diameter_part = self.square_sum * self.number_points - 2.0 * self.linear_sum * self.linear_sum + self.square_sum * self.number_points; self.__diameter = ( diameter_part / (self.number_points * (self.number_points - 1)) ) ** 0.5; return self.__diameter;
def __get_average_inter_cluster_distance(self, entry): """! @brief Calculates average inter cluster distance between current and specified clusters. @param[in] entry (cfentry): Clustering feature to which distance should be obtained. @return (double) Average inter cluster distance. """ linear_part_distance = sum(list_math_multiplication(self.linear_sum, entry.linear_sum)); return ( (entry.number_points * self.square_sum - 2.0 * linear_part_distance + self.number_points * entry.square_sum) / (self.number_points * entry.number_points) ) ** 0.5;
def __get_average_intra_cluster_distance(self, entry): """! @brief Calculates average intra cluster distance between current and specified clusters. @param[in] entry (cfentry): Clustering feature to which distance should be obtained. @return (double) Average intra cluster distance. """ linear_part_first = list_math_addition(self.linear_sum, entry.linear_sum); linear_part_second = linear_part_first; linear_part_distance = sum(list_math_multiplication(linear_part_first, linear_part_second)); general_part_distance = 2.0 * (self.number_points + entry.number_points) * (self.square_sum + entry.square_sum) - 2.0 * linear_part_distance; return (general_part_distance / ( (self.number_points + entry.number_points) * (self.number_points + entry.number_points - 1.0) )) ** 0.5;
def __get_variance_increase_distance(self, entry): """! @brief Calculates variance increase distance between current and specified clusters. @param[in] entry (cfentry): Clustering feature to which distance should be obtained. @return (double) Variance increase distance. """ linear_part_12 = list_math_addition(self.linear_sum, entry.linear_sum); variance_part_first = (self.square_sum + entry.square_sum) - \ 2.0 * sum(list_math_multiplication(linear_part_12, linear_part_12)) / (self.number_points + entry.number_points) + \ (self.number_points + entry.number_points) * sum(list_math_multiplication(linear_part_12, linear_part_12)) / (self.number_points + entry.number_points)**2.0; linear_part_11 = sum(list_math_multiplication(self.linear_sum, self.linear_sum)); variance_part_second = -( self.square_sum - (2.0 * linear_part_11 / self.number_points) + (linear_part_11 / self.number_points) ); linear_part_22 = sum(list_math_multiplication(entry.linear_sum, entry.linear_sum)); variance_part_third = -( entry.square_sum - (2.0 / entry.number_points) * linear_part_22 + entry.number_points * (1.0 / entry.number_points ** 2.0) * linear_part_22 ); return (variance_part_first + variance_part_second + variance_part_third);
def get_distance(self, node, type_measurement): """! @brief Calculates distance between nodes in line with specified type measurement. @param[in] node (cfnode): CF-node that is used for calculation distance to the current node. @param[in] type_measurement (measurement_type): Measurement type that is used for calculation distance. @return (double) Distance between two nodes. """ return self.feature.get_distance(node.feature, type_measurement);
def insert_successor(self, successor): """! @brief Insert successor to the node. @param[in] successor (cfnode): Successor for adding. """ self.feature += successor.feature; self.successors.append(successor); successor.parent = self;
def remove_successor(self, successor): """! @brief Remove successor from the node. @param[in] successor (cfnode): Successor for removing. """ self.feature -= successor.feature; self.successors.append(successor); successor.parent = self;
def merge(self, node): """! @brief Merge non-leaf node to the current. @param[in] node (non_leaf_node): Non-leaf node that should be merged with current. """ self.feature += node.feature; for child in node.successors: child.parent = self; self.successors.append(child);
def get_farthest_successors(self, type_measurement): """! @brief Find pair of farthest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining farthest successors. @return (list) Pair of farthest successors represented by list [cfnode1, cfnode2]. """ farthest_node1 = None; farthest_node2 = None; farthest_distance = 0; for i in range(0, len(self.successors)): candidate1 = self.successors[i]; for j in range(i + 1, len(self.successors)): candidate2 = self.successors[j]; candidate_distance = candidate1.get_distance(candidate2, type_measurement); if (candidate_distance > farthest_distance): farthest_distance = candidate_distance; farthest_node1 = candidate1; farthest_node2 = candidate2; return [farthest_node1, farthest_node2];
def get_nearest_successors(self, type_measurement): """! @brief Find pair of nearest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest successors. @return (list) Pair of nearest successors represented by list. """ nearest_node1 = None; nearest_node2 = None; nearest_distance = float("Inf"); for i in range(0, len(self.successors)): candidate1 = self.successors[i]; for j in range(i + 1, len(self.successors)): candidate2 = self.successors[j]; candidate_distance = candidate1.get_distance(candidate2, type_measurement); if (candidate_distance < nearest_distance): nearest_distance = candidate_distance; nearest_node1 = candidate1; nearest_node2 = candidate2; return [nearest_node1, nearest_node2];
def insert_entry(self, entry): """! @brief Insert new clustering feature to the leaf node. @param[in] entry (cfentry): Clustering feature. """ self.feature += entry; self.entries.append(entry);
def remove_entry(self, entry): """! @brief Remove clustering feature from the leaf node. @param[in] entry (cfentry): Clustering feature. """ self.feature -= entry; self.entries.remove(entry);
def merge(self, node): """! @brief Merge leaf node to the current. @param[in] node (leaf_node): Leaf node that should be merged with current. """ self.feature += node.feature; # Move entries from merged node for entry in node.entries: self.entries.append(entry);
def get_farthest_entries(self, type_measurement): """! @brief Find pair of farthest entries of the node. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining farthest entries. @return (list) Pair of farthest entries of the node that are represented by list. """ farthest_entity1 = None; farthest_entity2 = None; farthest_distance = 0; for i in range(0, len(self.entries)): candidate1 = self.entries[i]; for j in range(i + 1, len(self.entries)): candidate2 = self.entries[j]; candidate_distance = candidate1.get_distance(candidate2, type_measurement); if (candidate_distance > farthest_distance): farthest_distance = candidate_distance; farthest_entity1 = candidate1; farthest_entity2 = candidate2; return [farthest_entity1, farthest_entity2];
def get_nearest_index_entry(self, entry, type_measurement): """! @brief Find nearest index of nearest entry of node for the specified entry. @param[in] entry (cfentry): Entry that is used for calculation distance. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified. @return (uint) Index of nearest entry of node for the specified entry. """ minimum_distance = float('Inf'); nearest_index = 0; for candidate_index in range(0, len(self.entries)): candidate_distance = self.entries[candidate_index].get_distance(entry, type_measurement); if (candidate_distance < minimum_distance): nearest_index = candidate_index; return nearest_index;
def get_nearest_entry(self, entry, type_measurement): """! @brief Find nearest entry of node for the specified entry. @param[in] entry (cfentry): Entry that is used for calculation distance. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified. @return (cfentry) Nearest entry of node for the specified entry. """ min_key = lambda cur_entity: cur_entity.get_distance(entry, type_measurement); return min(self.__entries, key = min_key);
def get_level_nodes(self, level): """! @brief Traverses CF-tree to obtain nodes at the specified level. @param[in] level (uint): CF-tree level from that nodes should be returned. @return (list) List of CF-nodes that are located on the specified level of the CF-tree. """ level_nodes = []; if (level < self.__height): level_nodes = self.__recursive_get_level_nodes(level, self.__root); return level_nodes;
def __recursive_get_level_nodes(self, level, node): """! @brief Traverses CF-tree to obtain nodes at the specified level recursively. @param[in] level (uint): Current CF-tree level. @param[in] node (cfnode): CF-node from that traversing is performed. @return (list) List of CF-nodes that are located on the specified level of the CF-tree. """ level_nodes = []; if (level is 0): level_nodes.append(node); else: for sucessor in node.successors: level_nodes += self.__recursive_get_level_nodes(level - 1, sucessor); return level_nodes;
def insert_cluster(self, cluster): """! @brief Insert cluster that is represented as list of points where each point is represented by list of coordinates. @details Clustering feature is created for that cluster and inserted to the tree. @param[in] cluster (list): Cluster that is represented by list of points that should be inserted to the tree. """ entry = cfentry(len(cluster), linear_sum(cluster), square_sum(cluster)); self.insert(entry);
def insert(self, entry): """! @brief Insert clustering feature to the tree. @param[in] entry (cfentry): Clustering feature that should be inserted. """ if (self.__root is None): node = leaf_node(entry, None, [ entry ], None); self.__root = node; self.__leafes.append(node); # Update statistics self.__amount_entries += 1; self.__amount_nodes += 1; self.__height += 1; # root has successor now else: child_node_updation = self.__recursive_insert(entry, self.__root); if (child_node_updation is True): # Splitting has been finished, check for possibility to merge (at least we have already two children). if (self.__merge_nearest_successors(self.__root) is True): self.__amount_nodes -= 1;
def find_nearest_leaf(self, entry, search_node = None): """! @brief Search nearest leaf to the specified clustering feature. @param[in] entry (cfentry): Clustering feature. @param[in] search_node (cfnode): Node from that searching should be started, if None then search process will be started for the root. @return (leaf_node) Nearest node to the specified clustering feature. """ if (search_node is None): search_node = self.__root; nearest_node = search_node; if (search_node.type == cfnode_type.CFNODE_NONLEAF): min_key = lambda child_node: child_node.feature.get_distance(entry, self.__type_measurement); nearest_child_node = min(search_node.successors, key = min_key); nearest_node = self.find_nearest_leaf(entry, nearest_child_node); return nearest_node;
def __recursive_insert(self, entry, search_node): """! @brief Recursive insert of the entry to the tree. @details It performs all required procedures during insertion such as splitting, merging. @param[in] entry (cfentry): Clustering feature. @param[in] search_node (cfnode): Node from that insertion should be started. @return (bool) True if number of nodes at the below level is changed, otherwise False. """ # None-leaf node if (search_node.type == cfnode_type.CFNODE_NONLEAF): return self.__insert_for_noneleaf_node(entry, search_node); # Leaf is reached else: return self.__insert_for_leaf_node(entry, search_node);
def __insert_for_leaf_node(self, entry, search_node): """! @brief Recursive insert entry from leaf node to the tree. @param[in] entry (cfentry): Clustering feature. @param[in] search_node (cfnode): None-leaf node from that insertion should be started. @return (bool) True if number of nodes at the below level is changed, otherwise False. """ node_amount_updation = False; # Try to absorb by the entity index_nearest_entry = search_node.get_nearest_index_entry(entry, self.__type_measurement); merged_entry = search_node.entries[index_nearest_entry] + entry; # Otherwise try to add new entry if (merged_entry.get_diameter() > self.__threshold): # If it's not exceeded append entity and update feature of the leaf node. search_node.insert_entry(entry); # Otherwise current node should be splitted if (len(search_node.entries) > self.__max_entries): self.__split_procedure(search_node); node_amount_updation = True; # Update statistics self.__amount_entries += 1; else: search_node.entries[index_nearest_entry] = merged_entry; search_node.feature += entry; return node_amount_updation;
def __insert_for_noneleaf_node(self, entry, search_node): """! @brief Recursive insert entry from none-leaf node to the tree. @param[in] entry (cfentry): Clustering feature. @param[in] search_node (cfnode): None-leaf node from that insertion should be started. @return (bool) True if number of nodes at the below level is changed, otherwise False. """ node_amount_updation = False; min_key = lambda child_node: child_node.get_distance(search_node, self.__type_measurement); nearest_child_node = min(search_node.successors, key = min_key); child_node_updation = self.__recursive_insert(entry, nearest_child_node); # Update clustering feature of none-leaf node. search_node.feature += entry; # Check branch factor, probably some leaf has been splitted and threshold has been exceeded. if (len(search_node.successors) > self.__branch_factor): # Check if it's aleady root then new root should be created (height is increased in this case). if (search_node is self.__root): self.__root = non_leaf_node(search_node.feature, None, [ search_node ], None); search_node.parent = self.__root; # Update statistics self.__amount_nodes += 1; self.__height += 1; [new_node1, new_node2] = self.__split_nonleaf_node(search_node); # Update parent list of successors parent = search_node.parent; parent.successors.remove(search_node); parent.successors.append(new_node1); parent.successors.append(new_node2); # Update statistics self.__amount_nodes += 1; node_amount_updation = True; elif (child_node_updation is True): # Splitting has been finished, check for possibility to merge (at least we have already two children). if (self.__merge_nearest_successors(search_node) is True): self.__amount_nodes -= 1; return node_amount_updation;
def __merge_nearest_successors(self, node): """! @brief Find nearest sucessors and merge them. @param[in] node (non_leaf_node): Node whose two nearest successors should be merged. @return (bool): True if merging has been successfully performed, otherwise False. """ merging_result = False; if (node.successors[0].type == cfnode_type.CFNODE_NONLEAF): [nearest_child_node1, nearest_child_node2] = node.get_nearest_successors(self.__type_measurement); if (len(nearest_child_node1.successors) + len(nearest_child_node2.successors) <= self.__branch_factor): node.successors.remove(nearest_child_node2); if (nearest_child_node2.type == cfnode_type.CFNODE_LEAF): self.__leafes.remove(nearest_child_node2); nearest_child_node1.merge(nearest_child_node2); merging_result = True; return merging_result;
def __split_procedure(self, split_node): """! @brief Starts node splitting procedure in the CF-tree from the specify node. @param[in] split_node (cfnode): CF-tree node that should be splitted. """ if (split_node is self.__root): self.__root = non_leaf_node(split_node.feature, None, [ split_node ], None); split_node.parent = self.__root; # Update statistics self.__amount_nodes += 1; self.__height += 1; [new_node1, new_node2] = self.__split_leaf_node(split_node); self.__leafes.remove(split_node); self.__leafes.append(new_node1); self.__leafes.append(new_node2); # Update parent list of successors parent = split_node.parent; parent.successors.remove(split_node); parent.successors.append(new_node1); parent.successors.append(new_node2); # Update statistics self.__amount_nodes += 1;
def __split_nonleaf_node(self, node): """! @brief Performs splitting of the specified non-leaf node. @param[in] node (non_leaf_node): Non-leaf node that should be splitted. @return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2]. """ [farthest_node1, farthest_node2] = node.get_farthest_successors(self.__type_measurement); # create new non-leaf nodes new_node1 = non_leaf_node(farthest_node1.feature, node.parent, [ farthest_node1 ], None); new_node2 = non_leaf_node(farthest_node2.feature, node.parent, [ farthest_node2 ], None); farthest_node1.parent = new_node1; farthest_node2.parent = new_node2; # re-insert other successors for successor in node.successors: if ( (successor is not farthest_node1) and (successor is not farthest_node2) ): distance1 = new_node1.get_distance(successor, self.__type_measurement); distance2 = new_node2.get_distance(successor, self.__type_measurement); if (distance1 < distance2): new_node1.insert_successor(successor); else: new_node2.insert_successor(successor); return [new_node1, new_node2];
def __split_leaf_node(self, node): """! @brief Performs splitting of the specified leaf node. @param[in] node (leaf_node): Leaf node that should be splitted. @return (list) New pair of leaf nodes [leaf_node1, leaf_node2]. @warning Splitted node is transformed to non_leaf. """ # search farthest pair of entries [farthest_entity1, farthest_entity2] = node.get_farthest_entries(self.__type_measurement); # create new nodes new_node1 = leaf_node(farthest_entity1, node.parent, [ farthest_entity1 ], None); new_node2 = leaf_node(farthest_entity2, node.parent, [ farthest_entity2 ], None); # re-insert other entries for entity in node.entries: if ( (entity is not farthest_entity1) and (entity is not farthest_entity2) ): distance1 = new_node1.feature.get_distance(entity, self.__type_measurement); distance2 = new_node2.feature.get_distance(entity, self.__type_measurement); if (distance1 < distance2): new_node1.insert_entry(entity); else: new_node2.insert_entry(entity); return [new_node1, new_node2];
def show_feature_destibution(self, data = None): """! @brief Shows feature distribution. @details Only features in 1D, 2D, 3D space can be visualized. @param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only. """ visualizer = cluster_visualizer(); print("amount of nodes: ", self.__amount_nodes); if (data is not None): visualizer.append_cluster(data, marker = 'x'); for level in range(0, self.height): level_nodes = self.get_level_nodes(level); centers = [ node.feature.get_centroid() for node in level_nodes ]; visualizer.append_cluster(centers, None, markersize = (self.height - level + 1) * 5); visualizer.show();
def process(self): """! @brief Performs cluster analysis in line with rules of agglomerative algorithm and similarity. @see get_clusters() """ if (self.__ccore is True): self.__clusters = wrapper.agglomerative_algorithm(self.__pointer_data, self.__number_clusters, self.__similarity); else: self.__clusters = [[index] for index in range(0, len(self.__pointer_data))]; current_number_clusters = len(self.__clusters); while (current_number_clusters > self.__number_clusters): self.__merge_similar_clusters(); current_number_clusters = len(self.__clusters);
def __merge_similar_clusters(self): """! @brief Merges the most similar clusters in line with link type. """ if (self.__similarity == type_link.AVERAGE_LINK): self.__merge_by_average_link(); elif (self.__similarity == type_link.CENTROID_LINK): self.__merge_by_centroid_link(); elif (self.__similarity == type_link.COMPLETE_LINK): self.__merge_by_complete_link(); elif (self.__similarity == type_link.SINGLE_LINK): self.__merge_by_signle_link(); else: raise NameError('Not supported similarity is used');
def __merge_by_average_link(self): """! @brief Merges the most similar clusters in line with average link type. """ minimum_average_distance = float('Inf'); for index_cluster1 in range(0, len(self.__clusters)): for index_cluster2 in range(index_cluster1 + 1, len(self.__clusters)): # Find farthest objects candidate_average_distance = 0.0; for index_object1 in self.__clusters[index_cluster1]: for index_object2 in self.__clusters[index_cluster2]: candidate_average_distance += euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]); candidate_average_distance /= (len(self.__clusters[index_cluster1]) + len(self.__clusters[index_cluster2])); if (candidate_average_distance < minimum_average_distance): minimum_average_distance = candidate_average_distance; indexes = [index_cluster1, index_cluster2]; self.__clusters[indexes[0]] += self.__clusters[indexes[1]]; self.__clusters.pop(indexes[1]);
def __merge_by_centroid_link(self): """! @brief Merges the most similar clusters in line with centroid link type. """ minimum_centroid_distance = float('Inf'); indexes = None; for index1 in range(0, len(self.__centers)): for index2 in range(index1 + 1, len(self.__centers)): distance = euclidean_distance_square(self.__centers[index1], self.__centers[index2]); if (distance < minimum_centroid_distance): minimum_centroid_distance = distance; indexes = [index1, index2]; self.__clusters[indexes[0]] += self.__clusters[indexes[1]]; self.__centers[indexes[0]] = self.__calculate_center(self.__clusters[indexes[0]]); self.__clusters.pop(indexes[1]); # remove merged cluster. self.__centers.pop(indexes[1]);
def __merge_by_complete_link(self): """! @brief Merges the most similar clusters in line with complete link type. """ minimum_complete_distance = float('Inf'); indexes = None; for index_cluster1 in range(0, len(self.__clusters)): for index_cluster2 in range(index_cluster1 + 1, len(self.__clusters)): candidate_maximum_distance = self.__calculate_farthest_distance(index_cluster1, index_cluster2); if (candidate_maximum_distance < minimum_complete_distance): minimum_complete_distance = candidate_maximum_distance; indexes = [index_cluster1, index_cluster2]; self.__clusters[indexes[0]] += self.__clusters[indexes[1]]; self.__clusters.pop(indexes[1]);
def __calculate_farthest_distance(self, index_cluster1, index_cluster2): """! @brief Finds two farthest objects in two specified clusters in terms and returns distance between them. @param[in] (uint) Index of the first cluster. @param[in] (uint) Index of the second cluster. @return The farthest euclidean distance between two clusters. """ candidate_maximum_distance = 0.0; for index_object1 in self.__clusters[index_cluster1]: for index_object2 in self.__clusters[index_cluster2]: distance = euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]); if (distance > candidate_maximum_distance): candidate_maximum_distance = distance; return candidate_maximum_distance;
def __merge_by_signle_link(self): """! @brief Merges the most similar clusters in line with single link type. """ minimum_single_distance = float('Inf'); indexes = None; for index_cluster1 in range(0, len(self.__clusters)): for index_cluster2 in range(index_cluster1 + 1, len(self.__clusters)): candidate_minimum_distance = self.__calculate_nearest_distance(index_cluster1, index_cluster2); if (candidate_minimum_distance < minimum_single_distance): minimum_single_distance = candidate_minimum_distance; indexes = [index_cluster1, index_cluster2]; self.__clusters[indexes[0]] += self.__clusters[indexes[1]]; self.__clusters.pop(indexes[1]);
def __calculate_nearest_distance(self, index_cluster1, index_cluster2): """! @brief Finds two nearest objects in two specified clusters and returns distance between them. @param[in] (uint) Index of the first cluster. @param[in] (uint) Index of the second cluster. @return The nearest euclidean distance between two clusters. """ candidate_minimum_distance = float('Inf'); for index_object1 in self.__clusters[index_cluster1]: for index_object2 in self.__clusters[index_cluster2]: distance = euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]); if (distance < candidate_minimum_distance): candidate_minimum_distance = distance; return candidate_minimum_distance;
def __calculate_center(self, cluster): """! @brief Calculates new center. @return (list) New value of the center of the specified cluster. """ dimension = len(self.__pointer_data[cluster[0]]); center = [0] * dimension; for index_point in cluster: for index_dimension in range(0, dimension): center[index_dimension] += self.__pointer_data[index_point][index_dimension]; for index_dimension in range(0, dimension): center[index_dimension] /= len(cluster); return center;
def som_create(rows, cols, conn_type, parameters): """! @brief Create of self-organized map using CCORE pyclustering library. @param[in] rows (uint): Number of neurons in the column (number of rows). @param[in] cols (uint): Number of neurons in the row (number of columns). @param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour). @param[in] parameters (som_parameters): Other specific parameters. @return (POINTER) C-pointer to object of self-organized feature in memory. """ ccore = ccore_library.get() c_params = c_som_parameters() c_params.init_type = parameters.init_type c_params.init_radius = parameters.init_radius c_params.init_learn_rate = parameters.init_learn_rate c_params.adaptation_threshold = parameters.adaptation_threshold ccore.som_create.restype = POINTER(c_void_p) som_pointer = ccore.som_create(c_uint(rows), c_uint(cols), c_uint(conn_type), pointer(c_params)) return som_pointer
def som_load(som_pointer, weights, award, capture_objects): """! @brief Load dump of the network to SOM. @details Initialize SOM using existed weights, amount of captured objects by each neuron, captured objects by each neuron. Initialization is not performed if weights are empty. @param[in] som_pointer (POINTER): pointer to object of self-organized map. @param[in] weights (list): weights that should assigned to neurons. @param[in] awards (list): amount of captured objects by each neuron. @param[in] capture_objects (list): captured objects by each neuron. """ if len(weights) == 0: return ccore = ccore_library.get() package_weights = package_builder(weights, c_double).create() package_award = package_builder(award, c_size_t).create() package_capture_objects = package_builder(capture_objects, c_size_t).create() ccore.som_load(som_pointer, package_weights, package_award, package_capture_objects)
def som_train(som_pointer, data, epochs, autostop): """! @brief Trains self-organized feature map (SOM) using CCORE pyclustering library. @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates. @param[in] epochs (uint): Number of epochs for training. @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred. @return (uint) Number of learining iterations. """ pointer_data = package_builder(data, c_double).create() ccore = ccore_library.get() ccore.som_train.restype = c_size_t return ccore.som_train(som_pointer, pointer_data, c_uint(epochs), autostop)
def som_simulate(som_pointer, pattern): """! @brief Processes input pattern (no learining) and returns index of neuron-winner. @details Using index of neuron winner catched object can be obtained using property capture_objects. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. @param[in] pattern (list): input pattern. @return Returns index of neuron-winner. """ pointer_data = package_builder(pattern, c_double).create() ccore = ccore_library.get() ccore.som_simulate.restype = c_size_t return ccore.som_simulate(som_pointer, pointer_data)
def som_get_winner_number(som_pointer): """! @brief Returns of number of winner at the last step of learning process. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_winner_number.restype = c_size_t return ccore.som_get_winner_number(som_pointer)
def som_get_size(som_pointer): """! @brief Returns size of self-organized map (number of neurons). @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_size.restype = c_size_t return ccore.som_get_size(som_pointer)
def som_get_capture_objects(som_pointer): """! @brief Returns list of indexes of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_capture_objects.restype = POINTER(pyclustering_package) package = ccore.som_get_capture_objects(som_pointer) result = package_extractor(package).extract() return result
def allocate_sync_ensembles(self, tolerance = 0.1, threshold_steps = 1): """! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @param[in] threshold_steps (uint): Number of steps from the end of simulation that should be analysed for ensemble allocation. If amout of simulation steps has been less than threshold steps than amount of steps will be reduced to amout of simulation steps. @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]." """ clusters = [ [0] ]; number_oscillators = len(self._dynamic[0]); for i in range(1, number_oscillators, 1): captured_neuron = True; for cluster in clusters: neuron_index = cluster[0]; analysis_steps = threshold_steps; if (len(self._dynamic) < analysis_steps): analysis_steps = len(self._dynamic); analysis_start_step_index = len(self._dynamic) - 1; for step in range(analysis_start_step_index, analysis_start_step_index - analysis_steps, -1): neuron_amplitude = self._dynamic[step][neuron_index]; candidate_amplitude = self._dynamic[step][i]; if ( not (candidate_amplitude < (neuron_amplitude + tolerance)) or not (candidate_amplitude > (neuron_amplitude - tolerance)) ): captured_neuron = False; break; if ( captured_neuron is True ): cluster.append(i); break; if (captured_neuron is False): clusters.append([i]); return clusters;
def outputs(self, values): """! @brief Sets outputs of neurons. """ self._outputs = [val for val in values]; self._outputs_buffer = [val for val in values];
def _neuron_states(self, inputs, t, argv): """! @brief Returns new value of the neuron (oscillator). @param[in] inputs (list): Initial values (current) of the neuron - excitatory. @param[in] t (double): Current time of simulation. @param[in] argv (tuple): Extra arguments that are not used for integration - index of the neuron. @return (double) New value of the neuron. """ xi = inputs[0]; index = argv; # own impact impact = self._weight[index][index] * self._outputs[index]; for i in range(0, self._num_osc, 1): if (self.has_connection(i, index)): impact += self._weight[index][i] * self._outputs[i]; x = -xi + impact; if (xi > 1): self._outputs_buffer[index] = 1; if (xi < -1): self._outputs_buffer[index] = -1; return x;
def simulate_static(self, steps, time, solution = solve_type.RK4, collect_dynamic = False): """! @brief Performs static simulation of hysteresis oscillatory network. @param[in] steps (uint): Number steps of simulations during simulation. @param[in] time (double): Time of simulation. @param[in] solution (solve_type): Type of solution (solving). @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics. @return (hysteresis_dynamic) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time, otherwise returns only last values (last step of simulation) of dynamic. """ # Check solver before simulation if (solution == solve_type.FAST): raise NameError("Solver FAST is not support due to low accuracy that leads to huge error."); elif (solution == solve_type.RKF45): raise NameError("Solver RKF45 is not support in python version."); dyn_state = None; dyn_time = None; if (collect_dynamic == True): dyn_state = []; dyn_time = []; dyn_state.append(self._states); dyn_time.append(0); step = time / steps; int_step = step / 10.0; for t in numpy.arange(step, time + step, step): # update states of oscillators self._states = self._calculate_states(solution, t, step, int_step); # update states of oscillators if (collect_dynamic is True): dyn_state.append(self._states); dyn_time.append(t); if (collect_dynamic is False): dyn_state.append(self._states); dyn_time.append(time); return hysteresis_dynamic(dyn_state, dyn_time);
def _calculate_states(self, solution, t, step, int_step): """! @brief Calculates new states for neurons using differential calculus. Returns new states for neurons. @param[in] solution (solve_type): Type solver of the differential equation. @param[in] t (double): Current time of simulation. @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated. @param[in] int_step (double): Step differentiation that is used for solving differential equation. @return (list) New states for neurons. """ next_states = [0] * self._num_osc; for index in range (0, self._num_osc, 1): result = odeint(self._neuron_states, self._states[index], numpy.arange(t - step, t, int_step), (index , )); next_states[index] = result[len(result) - 1][0]; self._outputs = [val for val in self._outputs_buffer]; return next_states;
def output(self): """! @brief Returns output dynamic of the network. """ if (self.__ccore_legion_dynamic_pointer is not None): return wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer); return self.__output;
def inhibitor(self): """! @brief Returns output dynamic of the global inhibitor of the network. """ if (self.__ccore_legion_dynamic_pointer is not None): return wrapper.legion_dynamic_get_inhibitory_output(self.__ccore_legion_dynamic_pointer); return self.__inhibitor;
def time(self): """! @brief Returns simulation time. """ if (self.__ccore_legion_dynamic_pointer is not None): return wrapper.legion_dynamic_get_time(self.__ccore_legion_dynamic_pointer); return list(range(len(self)));
def allocate_sync_ensembles(self, tolerance = 0.1): """! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]. """ if (self.__ccore_legion_dynamic_pointer is not None): self.__output = wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer); return allocate_sync_ensembles(self.__output, tolerance);
def __create_stimulus(self, stimulus): """! @brief Create stimulus for oscillators in line with stimulus map and parameters. @param[in] stimulus (list): Stimulus for oscillators that is represented by list, number of stimulus should be equal number of oscillators. """ if (len(stimulus) != self._num_osc): raise NameError("Number of stimulus should be equal number of oscillators in the network."); else: self._stimulus = []; for val in stimulus: if (val > 0): self._stimulus.append(self._params.I); else: self._stimulus.append(0);
def __create_dynamic_connections(self): """! @brief Create dynamic connection in line with input stimulus. """ if (self._stimulus is None): raise NameError("Stimulus should initialed before creation of the dynamic connections in the network."); self._dynamic_coupling = [ [0] * self._num_osc for i in range(self._num_osc)]; for i in range(self._num_osc): neighbors = self.get_neighbors(i); if ( (len(neighbors) > 0) and (self._stimulus[i] > 0) ): number_stimulated_neighbors = 0.0; for j in neighbors: if (self._stimulus[j] > 0): number_stimulated_neighbors += 1.0; if (number_stimulated_neighbors > 0): dynamic_weight = self._params.Wt / number_stimulated_neighbors; for j in neighbors: self._dynamic_coupling[i][j] = dynamic_weight;
def simulate(self, steps, time, stimulus, solution = solve_type.RK4, collect_dynamic = True): """! @brief Performs static simulation of LEGION oscillatory network. @param[in] steps (uint): Number steps of simulations during simulation. @param[in] time (double): Time of simulation. @param[in] stimulus (list): Stimulus for oscillators, number of stimulus should be equal to number of oscillators, example of stimulus for 5 oscillators [0, 0, 1, 1, 0], value of stimulus is defined by parameter 'I'. @param[in] solution (solve_type): Method that is used for differential equation. @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics. @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time, otherwise returns only last values (last step of simulation) of dynamic. """ if (self.__ccore_legion_pointer is not None): pointer_dynamic = wrapper.legion_simulate(self.__ccore_legion_pointer, steps, time, solution, collect_dynamic, stimulus); return legion_dynamic(None, None, None, pointer_dynamic); # Check solver before simulation if (solution == solve_type.FAST): raise NameError("Solver FAST is not support due to low accuracy that leads to huge error."); elif (solution == solve_type.RKF45): raise NameError("Solver RKF45 is not support in python version. RKF45 is supported in CCORE implementation."); # set stimulus self.__create_stimulus(stimulus); # calculate dynamic weights self.__create_dynamic_connections(); dyn_exc = None; dyn_time = None; dyn_ginh = None; # Store only excitatory of the oscillator if (collect_dynamic == True): dyn_exc = []; dyn_time = []; dyn_ginh = []; step = time / steps; int_step = step / 10.0; for t in numpy.arange(step, time + step, step): # update states of oscillators self._calculate_states(solution, t, step, int_step); # update states of oscillators if (collect_dynamic == True): dyn_exc.append(self._excitatory); dyn_time.append(t); dyn_ginh.append(self._global_inhibitor); else: dyn_exc = self._excitatory; dyn_time = t; dyn_ginh = self._global_inhibitor; return legion_dynamic(dyn_exc, dyn_ginh, dyn_time);
def _calculate_states(self, solution, t, step, int_step): """! @brief Calculates new state of each oscillator in the network. @param[in] solution (solve_type): Type solver of the differential equation. @param[in] t (double): Current time of simulation. @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated. @param[in] int_step (double): Step differentiation that is used for solving differential equation. """ next_excitatory = [0.0] * self._num_osc; next_inhibitory = [0.0] * self._num_osc; next_potential = []; if (self._params.ENABLE_POTENTIONAL is True): next_potential = [0.0] * self._num_osc; # Update states of oscillators for index in range (0, self._num_osc, 1): if (self._params.ENABLE_POTENTIONAL is True): result = odeint(self._legion_state, [self._excitatory[index], self._inhibitory[index], self._potential[index]], numpy.arange(t - step, t, int_step), (index , )); [ next_excitatory[index], next_inhibitory[index], next_potential[index] ] = result[len(result) - 1][0:3]; else: result = odeint(self._legion_state_simplify, [self._excitatory[index], self._inhibitory[index] ], numpy.arange(t - step, t, int_step), (index , )); [ next_excitatory[index], next_inhibitory[index] ] = result[len(result) - 1][0:2]; # Update coupling term neighbors = self.get_neighbors(index); coupling = 0 for index_neighbor in neighbors: coupling += self._dynamic_coupling[index][index_neighbor] * heaviside(self._excitatory[index_neighbor] - self._params.teta_x); self._buffer_coupling_term[index] = coupling - self._params.Wz * heaviside(self._global_inhibitor - self._params.teta_xz); # Update state of global inhibitory result = odeint(self._global_inhibitor_state, self._global_inhibitor, numpy.arange(t - step, t, int_step), (None, )); self._global_inhibitor = result[len(result) - 1][0]; self._noise = [random.random() * self._params.ro for i in range(self._num_osc)]; self._coupling_term = self._buffer_coupling_term[:]; self._inhibitory = next_inhibitory[:]; self._excitatory = next_excitatory[:]; if (self._params.ENABLE_POTENTIONAL is True): self._potential = next_potential[:];
def _global_inhibitor_state(self, z, t, argv): """! @brief Returns new value of global inhibitory @param[in] z (dobule): Current value of inhibitory. @param[in] t (double): Current time of simulation. @param[in] argv (tuple): It's not used, can be ignored. @return (double) New value if global inhibitory (not assign). """ sigma = 0.0; for x in self._excitatory: if (x > self._params.teta_zx): sigma = 1.0; break; return self._params.fi * (sigma - z);
def _legion_state_simplify(self, inputs, t, argv): """! @brief Returns new values of excitatory and inhibitory parts of oscillator of oscillator. @details Simplify model doesn't consider oscillator potential. @param[in] inputs (list): Initial values (current) of oscillator [excitatory, inhibitory]. @param[in] t (double): Current time of simulation. @param[in] argv (uint): Extra arguments that are not used for integration - index of oscillator. @return (list) New values of excitatoty and inhibitory part of oscillator (not assign). """ index = argv; x = inputs[0]; # excitatory y = inputs[1]; # inhibitory dx = 3.0 * x - x ** 3.0 + 2.0 - y + self._stimulus[index] + self._coupling_term[index] + self._noise[index]; dy = self._params.eps * (self._params.gamma * (1.0 + math.tanh(x / self._params.betta)) - y); neighbors = self.get_neighbors(index); potential = 0.0; for index_neighbor in neighbors: potential += self._params.T * heaviside(self._excitatory[index_neighbor] - self._params.teta_x); return [dx, dy];
def _legion_state(self, inputs, t, argv): """! @brief Returns new values of excitatory and inhibitory parts of oscillator and potential of oscillator. @param[in] inputs (list): Initial values (current) of oscillator [excitatory, inhibitory, potential]. @param[in] t (double): Current time of simulation. @param[in] argv (uint): Extra arguments that are not used for integration - index of oscillator. @return (list) New values of excitatoty and inhibitory part of oscillator and new value of potential (not assign). """ index = argv; x = inputs[0]; # excitatory y = inputs[1]; # inhibitory p = inputs[2]; # potential potential_influence = heaviside(p + math.exp(-self._params.alpha * t) - self._params.teta); dx = 3.0 * x - x ** 3.0 + 2.0 - y + self._stimulus[index] * potential_influence + self._coupling_term[index] + self._noise[index]; dy = self._params.eps * (self._params.gamma * (1.0 + math.tanh(x / self._params.betta)) - y); neighbors = self.get_neighbors(index); potential = 0.0; for index_neighbor in neighbors: potential += self._params.T * heaviside(self._excitatory[index_neighbor] - self._params.teta_x); dp = self._params.lamda * (1.0 - p) * heaviside(potential - self._params.teta_p) - self._params.mu * p; return [dx, dy, dp];
def allocate_map_coloring(self, tolerance = 0.1): """! @brief Allocates coloring map for graph that has been processed. @param[in] tolerance (double): Defines maximum deviation between phases. @return (list) Colors for each node (index of node in graph), for example [color1, color2, color2, ...]. """ clusters = self.allocate_color_clusters(tolerance); number_oscillators = len(self._dynamic[0]); coloring_map = [0] * number_oscillators; for color_index in range(len(clusters)): for node_index in clusters[color_index]: coloring_map[node_index] = color_index; return coloring_map;
def _create_connections(self, graph_matrix): """! @brief Creates connection in the network in line with graph. @param[in] graph_matrix (list): Matrix representation of the graph. """ for row in range(0, len(graph_matrix)): for column in range (0, len(graph_matrix[row])): if (graph_matrix[row][column] > 0): self.set_connection(row, column);
def _phase_kuramoto(self, teta, t, argv): """! @brief Returns result of phase calculation for oscillator in the network. @param[in] teta (double): Value of phase of the oscillator with index argv in the network. @param[in] t (double): Unused, can be ignored. @param[in] argv (uint): Index of the oscillator in the network. @return (double) New value of phase for oscillator with index argv. """ index = argv; phase = 0; for k in range(0, self._num_osc): if (self.has_connection(index, k) == True): phase += self._negative_weight * math.sin(self._phases[k] - teta); else: phase += self._positive_weight * math.sin(self._phases[k] - teta); return ( phase / self._reduction );
def process(self, order = 0.998, solution = solve_type.FAST, collect_dynamic = False): """! @brief Performs simulation of the network (performs solving of graph coloring problem). @param[in] order (double): Defines when process of synchronization in the network is over, range from 0 to 1. @param[in] solution (solve_type): defines type (method) of solving diff. equation. @param[in] collect_dynamic (bool): If True - return full dynamic of the network, otherwise - last state of phases. @return (syncnet_analyser) Returns analyser of results of coloring. """ analyser = self.simulate_dynamic(order, solution, collect_dynamic); return syncgcolor_analyser(analyser.output, analyser.time, None);
def allocate_clusters(self, eps = 0.01, indexes = None, iteration = None): """! @brief Returns list of clusters in line with state of ocillators (phases). @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one cluster. @param[in] indexes (list): List of real object indexes and it should be equal to amount of oscillators (in case of 'None' - indexes are in range [0; amount_oscillators]). @param[in] iteration (uint): Iteration of simulation that should be used for allocation. @return (list) List of clusters, for example [ [cluster1], [cluster2], ... ].) """ return self.allocate_sync_ensembles(eps, indexes, iteration);
def animate_cluster_allocation(dataset, analyser, animation_velocity = 75, tolerance = 0.1, save_movie = None, title = None): """! @brief Shows animation of output dynamic (output of each oscillator) during simulation on a circle from [0; 2pi]. @param[in] dataset (list): Input data that was used for processing by the network. @param[in] analyser (syncnet_analyser): Output dynamic analyser of the Sync network. @param[in] animation_velocity (uint): Interval between frames in milliseconds. @param[in] tolerance (double): Tolerance level that define maximal difference between phases of oscillators in one cluster. @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter. @param[in] title (string): If it is specified then title will be displayed on the animation plot. """ figure = plt.figure(); def init_frame(): return frame_generation(0); def frame_generation(index_dynamic): figure.clf(); if (title is not None): figure.suptitle(title, fontsize = 26, fontweight = 'bold'); ax1 = figure.add_subplot(121, projection='polar'); clusters = analyser.allocate_clusters(eps = tolerance, iteration = index_dynamic); dynamic = analyser.output[index_dynamic]; visualizer = cluster_visualizer(size_row = 2); visualizer.append_clusters(clusters, dataset); artist1, = ax1.plot(dynamic, [1.0] * len(dynamic), marker = 'o', color = 'blue', ls = ''); visualizer.show(figure, display = False); artist2 = figure.gca(); return [ artist1, artist2 ]; cluster_animation = animation.FuncAnimation(figure, frame_generation, len(analyser), interval = animation_velocity, init_func = init_frame, repeat_delay = 5000); if (save_movie is not None): # plt.rcParams['animation.ffmpeg_path'] = 'D:\\Program Files\\ffmpeg-3.3.1-win64-static\\bin\\ffmpeg.exe'; # ffmpeg_writer = animation.FFMpegWriter(fps = 15); # cluster_animation.save(save_movie, writer = ffmpeg_writer); cluster_animation.save(save_movie, writer = 'ffmpeg', fps = 15, bitrate = 1500); else: plt.show();
def _create_connections(self, radius): """! @brief Create connections between oscillators in line with input radius of connectivity. @param[in] radius (double): Connectivity radius between oscillators. """ if (self._ena_conn_weight is True): self._conn_weight = [[0] * self._num_osc for _ in range(0, self._num_osc, 1)]; maximum_distance = 0; minimum_distance = float('inf'); # Create connections for i in range(0, self._num_osc, 1): for j in range(i + 1, self._num_osc, 1): dist = euclidean_distance(self._osc_loc[i], self._osc_loc[j]); if (self._ena_conn_weight is True): self._conn_weight[i][j] = dist; self._conn_weight[j][i] = dist; if (dist > maximum_distance): maximum_distance = dist; if (dist < minimum_distance): minimum_distance = dist; if (dist <= radius): self.set_connection(i, j); if (self._ena_conn_weight is True): multiplier = 1; subtractor = 0; if (maximum_distance != minimum_distance): multiplier = (maximum_distance - minimum_distance); subtractor = minimum_distance; for i in range(0, self._num_osc, 1): for j in range(i + 1, self._num_osc, 1): value_conn_weight = (self._conn_weight[i][j] - subtractor) / multiplier; self._conn_weight[i][j] = value_conn_weight; self._conn_weight[j][i] = value_conn_weight;
def process(self, order = 0.998, solution = solve_type.FAST, collect_dynamic = True): """! @brief Peforms cluster analysis using simulation of the oscillatory network. @param[in] order (double): Order of synchronization that is used as indication for stopping processing. @param[in] solution (solve_type): Specified type of solving diff. equation. @param[in] collect_dynamic (bool): Specified requirement to collect whole dynamic of the network. @return (syncnet_analyser) Returns analyser of results of clustering. """ if (self._ccore_network_pointer is not None): pointer_output_dynamic = syncnet_process(self._ccore_network_pointer, order, solution, collect_dynamic); return syncnet_analyser(None, None, pointer_output_dynamic); else: output_sync_dynamic = self.simulate_dynamic(order, solution, collect_dynamic); return syncnet_analyser(output_sync_dynamic.output, output_sync_dynamic.time, None);
def _phase_kuramoto(self, teta, t, argv): """! @brief Overrided method for calculation of oscillator phase. @param[in] teta (double): Current value of phase. @param[in] t (double): Time (can be ignored). @param[in] argv (uint): Index of oscillator whose phase represented by argument teta. @return (double) New value of phase of oscillator with index 'argv'. """ index = argv; # index of oscillator phase = 0.0; # phase of a specified oscillator that will calculated in line with current env. states. neighbors = self.get_neighbors(index); for k in neighbors: conn_weight = 1.0; if (self._ena_conn_weight is True): conn_weight = self._conn_weight[index][k]; phase += conn_weight * self._weight * math.sin(self._phases[k] - teta); divider = len(neighbors); if (divider == 0): divider = 1.0; return ( self._freq[index] + (phase / divider) );
def show_network(self): """! @brief Shows connections in the network. It supports only 2-d and 3-d representation. """ if ( (self._ccore_network_pointer is not None) and (self._osc_conn is None) ): self._osc_conn = sync_connectivity_matrix(self._ccore_network_pointer); dimension = len(self._osc_loc[0]); if ( (dimension != 3) and (dimension != 2) ): raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented'); from matplotlib.font_manager import FontProperties; from matplotlib import rcParams; rcParams['font.sans-serif'] = ['Arial']; rcParams['font.size'] = 12; fig = plt.figure(); axes = None; if (dimension == 2): axes = fig.add_subplot(111); elif (dimension == 3): axes = fig.gca(projection='3d'); surface_font = FontProperties(); surface_font.set_name('Arial'); surface_font.set_size('12'); for i in range(0, self._num_osc, 1): if (dimension == 2): axes.plot(self._osc_loc[i][0], self._osc_loc[i][1], 'bo'); if (self._conn_represent == conn_represent.MATRIX): for j in range(i, self._num_osc, 1): # draw connection between two points only one time if (self.has_connection(i, j) == True): axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], 'b-', linewidth = 0.5); else: for j in self.get_neighbors(i): if ( (self.has_connection(i, j) == True) and (i > j) ): # draw connection between two points only one time axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], 'b-', linewidth = 0.5); elif (dimension == 3): axes.scatter(self._osc_loc[i][0], self._osc_loc[i][1], self._osc_loc[i][2], c = 'b', marker = 'o'); if (self._conn_represent == conn_represent.MATRIX): for j in range(i, self._num_osc, 1): # draw connection between two points only one time if (self.has_connection(i, j) == True): axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], [self._osc_loc[i][2], self._osc_loc[j][2]], 'b-', linewidth = 0.5); else: for j in self.get_neighbors(i): if ( (self.has_connection(i, j) == True) and (i > j) ): # draw connection between two points only one time axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], [self._osc_loc[i][2], self._osc_loc[j][2]], 'b-', linewidth = 0.5); plt.grid(); plt.show();
def simulate_static(self, steps, time, solution = solve_type.RK4): """! @brief Performs static simulation of oscillatory network based on Hodgkin-Huxley neuron model. @details Output dynamic is sensible to amount of steps of simulation and solver of differential equation. Python implementation uses 'odeint' from 'scipy', CCORE uses classical RK4 and RFK45 methods, therefore in case of CCORE HHN (Hodgkin-Huxley network) amount of steps should be greater than in case of Python HHN. @param[in] steps (uint): Number steps of simulations during simulation. @param[in] time (double): Time of simulation. @param[in] solution (solve_type): Type of solver for differential equations. @return (tuple) Dynamic of oscillatory network represented by (time, peripheral neurons dynamic, central elements dynamic), where types are (list, list, list). """ # Check solver before simulation if (solution == solve_type.FAST): raise NameError("Solver FAST is not support due to low accuracy that leads to huge error."); self._membrane_dynamic_pointer = None; if (self.__ccore_hhn_pointer is not None): self.__ccore_hhn_dynamic_pointer = wrapper.hhn_dynamic_create(True, False, False, False); wrapper.hhn_simulate(self.__ccore_hhn_pointer, steps, time, solution, self._stimulus, self.__ccore_hhn_dynamic_pointer); peripheral_membrane_potential = wrapper.hhn_dynamic_get_peripheral_evolution(self.__ccore_hhn_dynamic_pointer, 0); central_membrane_potential = wrapper.hhn_dynamic_get_central_evolution(self.__ccore_hhn_dynamic_pointer, 0); dynamic_time = wrapper.hhn_dynamic_get_time(self.__ccore_hhn_dynamic_pointer); self._membrane_dynamic_pointer = peripheral_membrane_potential; wrapper.hhn_dynamic_destroy(self.__ccore_hhn_dynamic_pointer); return (dynamic_time, peripheral_membrane_potential, central_membrane_potential); if (solution == solve_type.RKF45): raise NameError("Solver RKF45 is not support in python version."); dyn_peripheral = [ self._membrane_potential[:] ]; dyn_central = [ [0.0, 0.0] ]; dyn_time = [ 0.0 ]; step = time / steps; int_step = step / 10.0; for t in numpy.arange(step, time + step, step): # update states of oscillators (memb_peripheral, memb_central) = self._calculate_states(solution, t, step, int_step); # update states of oscillators dyn_peripheral.append(memb_peripheral); dyn_central.append(memb_central); dyn_time.append(t); self._membrane_dynamic_pointer = dyn_peripheral; return (dyn_time, dyn_peripheral, dyn_central);
def _calculate_states(self, solution, t, step, int_step): """! @brief Caclculates new state of each oscillator in the network. Returns only excitatory state of oscillators. @param[in] solution (solve_type): Type solver of the differential equations. @param[in] t (double): Current time of simulation. @param[in] step (uint): Step of solution at the end of which states of oscillators should be calculated. @param[in] int_step (double): Differentiation step that is used for solving differential equation. @return (list) New states of membrance potentials for peripheral oscillators and for cental elements as a list where the last two values correspond to central element 1 and 2. """ next_membrane = [0.0] * self._num_osc; next_active_sodium = [0.0] * self._num_osc; next_inactive_sodium = [0.0] * self._num_osc; next_active_potassium = [0.0] * self._num_osc; # Update states of oscillators for index in range (0, self._num_osc, 1): result = odeint(self.hnn_state, [ self._membrane_potential[index], self._active_cond_sodium[index], self._inactive_cond_sodium[index], self._active_cond_potassium[index] ], numpy.arange(t - step, t, int_step), (index , )); [ next_membrane[index], next_active_sodium[index], next_inactive_sodium[index], next_active_potassium[index] ] = result[len(result) - 1][0:4]; next_cn_membrane = [0.0, 0.0]; next_cn_active_sodium = [0.0, 0.0]; next_cn_inactive_sodium = [0.0, 0.0]; next_cn_active_potassium = [0.0, 0.0]; # Update states of central elements for index in range(0, len(self._central_element)): result = odeint(self.hnn_state, [ self._central_element[index].membrane_potential, self._central_element[index].active_cond_sodium, self._central_element[index].inactive_cond_sodium, self._central_element[index].active_cond_potassium ], numpy.arange(t - step, t, int_step), (self._num_osc + index , )); [ next_cn_membrane[index], next_cn_active_sodium[index], next_cn_inactive_sodium[index], next_cn_active_potassium[index] ] = result[len(result) - 1][0:4]; # Noise generation self._noise = [ 1.0 + 0.01 * (random.random() * 2.0 - 1.0) for i in range(self._num_osc)]; # Updating states of PNs self.__update_peripheral_neurons(t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium); # Updation states of CN self.__update_central_neurons(t, next_cn_membrane, next_cn_active_sodium, next_cn_inactive_sodium, next_cn_active_potassium); return (next_membrane, next_cn_membrane);
def __update_peripheral_neurons(self, t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium): """! @brief Update peripheral neurons in line with new values of current in channels. @param[in] t (doubles): Current time of simulation. @param[in] step (uint): Step (time duration) during simulation when states of oscillators should be calculated. @param[in] next_membrane (list): New values of membrane potentials for peripheral neurons. @Param[in] next_active_sodium (list): New values of activation conductances of the sodium channels for peripheral neurons. @param[in] next_inactive_sodium (list): New values of inactivaton conductances of the sodium channels for peripheral neurons. @param[in] next_active_potassium (list): New values of activation conductances of the potassium channel for peripheral neurons. """ self._membrane_potential = next_membrane[:]; self._active_cond_sodium = next_active_sodium[:]; self._inactive_cond_sodium = next_inactive_sodium[:]; self._active_cond_potassium = next_active_potassium[:]; for index in range(0, self._num_osc): if (self._pulse_generation[index] is False): if (self._membrane_potential[index] >= 0.0): self._pulse_generation[index] = True; self._pulse_generation_time[index].append(t); elif (self._membrane_potential[index] < 0.0): self._pulse_generation[index] = False; # Update connection from CN2 to PN if (self._link_weight3[index] == 0.0): if (self._membrane_potential[index] > self._params.threshold): self._link_pulse_counter[index] += step; if (self._link_pulse_counter[index] >= 1 / self._params.eps): self._link_weight3[index] = self._params.w3; self._link_activation_time[index] = t; elif ( not ((self._link_activation_time[index] < t) and (t < self._link_activation_time[index] + self._params.deltah)) ): self._link_weight3[index] = 0.0; self._link_pulse_counter[index] = 0.0;