idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
59,200
def find_biconnected_components ( graph ) : list_of_components = [ ] components = get_connected_components_as_subgraphs ( graph ) for component in components : edge_list = _internal_get_biconnected_components_edge_lists ( component ) list_of_components . extend ( edge_list ) return list_of_components
Finds all the biconnected components in a graph . Returns a list of lists each containing the edges that form a biconnected component . Returns an empty list for an empty graph .
59,201
def find_biconnected_components_as_subgraphs ( graph ) : list_of_graphs = [ ] list_of_components = find_biconnected_components ( graph ) for edge_list in list_of_components : subgraph = get_subgraph_from_edge_list ( graph , edge_list ) list_of_graphs . append ( subgraph ) return list_of_graphs
Finds the biconnected components and returns them as subgraphs .
59,202
def find_articulation_vertices ( graph ) : articulation_vertices = [ ] all_nodes = graph . get_all_node_ids ( ) if len ( all_nodes ) == 0 : return articulation_vertices components = get_connected_components_as_subgraphs ( graph ) for component in components : vertex_list = _internal_get_cut_vertex_list ( component ) articulation_vertices . extend ( vertex_list ) return articulation_vertices
Finds all of the articulation vertices within a graph . Returns a list of all articulation vertices within the graph . Returns an empty list for an empty graph .
59,203
def output_component ( graph , edge_stack , u , v ) : edge_list = [ ] while len ( edge_stack ) > 0 : edge_id = edge_stack . popleft ( ) edge_list . append ( edge_id ) edge = graph . get_edge ( edge_id ) tpl_a = ( u , v ) tpl_b = ( v , u ) if tpl_a == edge [ 'vertices' ] or tpl_b == edge [ 'vertices' ] : break return edge_list
Helper function to pop edges off the stack and produce a list of them .
59,204
def depth_first_search_with_parent_data ( graph , root_node = None , adjacency_lists = None ) : ordering = [ ] parent_lookup = { } children_lookup = defaultdict ( lambda : [ ] ) all_nodes = graph . get_all_node_ids ( ) if not all_nodes : return ordering , parent_lookup , children_lookup stack = deque ( ) discovered = defaultdict ( lambda : False ) unvisited_nodes = set ( all_nodes ) if root_node is None : root_node = all_nodes [ 0 ] if adjacency_lists is None : adj = lambda v : graph . neighbors ( v ) else : adj = lambda v : adjacency_lists [ v ] stack . appendleft ( root_node ) parent_lookup [ root_node ] = root_node while True : while len ( stack ) > 0 : u = stack . popleft ( ) if not discovered [ u ] : discovered [ u ] = True if u in unvisited_nodes : unvisited_nodes . remove ( u ) ordering . append ( u ) neighbors = adj ( u ) for n in neighbors [ : : - 1 ] : if discovered [ n ] : continue stack . appendleft ( n ) parent_lookup [ n ] = u children_lookup [ u ] . append ( n ) if len ( unvisited_nodes ) > 0 : u = unvisited_nodes . pop ( ) stack . appendleft ( u ) else : break return ordering , parent_lookup , children_lookup
Performs a depth - first search with visiting order of nodes determined by provided adjacency lists and also returns a parent lookup dict and a children lookup dict .
59,205
def graph_to_dot ( graph , node_renderer = None , edge_renderer = None ) : node_pairs = list ( graph . nodes . items ( ) ) edge_pairs = list ( graph . edges . items ( ) ) if node_renderer is None : node_renderer_wrapper = lambda nid : '' else : node_renderer_wrapper = lambda nid : ' [%s]' % ',' . join ( [ '%s=%s' % tpl for tpl in list ( node_renderer ( graph , nid ) . items ( ) ) ] ) graph_string = 'digraph G {\n' graph_string += 'overlap=scale;\n' for node_id , node in node_pairs : graph_string += '%i%s;\n' % ( node_id , node_renderer_wrapper ( node_id ) ) for edge_id , edge in edge_pairs : node_a = edge [ 'vertices' ] [ 0 ] node_b = edge [ 'vertices' ] [ 1 ] graph_string += '%i -> %i;\n' % ( node_a , node_b ) graph_string += '}' return graph_string
Produces a DOT specification string from the provided graph .
59,206
def get_connected_components ( graph ) : list_of_components = [ ] component = [ ] unreached = set ( graph . get_all_node_ids ( ) ) to_explore = deque ( ) while len ( unreached ) > 0 : if len ( to_explore ) == 0 : n = unreached . pop ( ) unreached . add ( n ) to_explore . append ( n ) component = [ ] list_of_components . append ( component ) while len ( to_explore ) > 0 : n = to_explore . pop ( ) if n in unreached : component . append ( n ) unreached . remove ( n ) nodes = graph . neighbors ( n ) for n in nodes : if n in unreached : to_explore . append ( n ) return list_of_components
Finds all connected components of the graph . Returns a list of lists each containing the nodes that form a connected component . Returns an empty list for an empty graph .
59,207
def get_connected_components_as_subgraphs ( graph ) : components = get_connected_components ( graph ) list_of_graphs = [ ] for c in components : edge_ids = set ( ) nodes = [ graph . get_node ( node ) for node in c ] for n in nodes : for e in n [ 'edges' ] : edge = graph . get_edge ( e ) a , b = edge [ 'vertices' ] if a in c and b in c : edge_ids . add ( e ) list_of_edges = list ( edge_ids ) subgraph = make_subgraph ( graph , c , list_of_edges ) list_of_graphs . append ( subgraph ) return list_of_graphs
Finds all connected components of the graph . Returns a list of graph objects each representing a connected component . Returns an empty list for an empty graph .
59,208
def new_edge ( self , node_a , node_b , cost = 1 ) : edge_id = super ( UndirectedGraph , self ) . new_edge ( node_a , node_b , cost ) self . nodes [ node_b ] [ 'edges' ] . append ( edge_id ) return edge_id
Adds a new undirected edge between node_a and node_b with a cost . Returns the edge id of the new edge .
59,209
def delete_edge_by_id ( self , edge_id ) : edge = self . get_edge ( edge_id ) from_node_id = edge [ 'vertices' ] [ 0 ] from_node = self . get_node ( from_node_id ) from_node [ 'edges' ] . remove ( edge_id ) to_node_id = edge [ 'vertices' ] [ 1 ] to_node = self . get_node ( to_node_id ) to_node [ 'edges' ] . remove ( edge_id ) del self . edges [ edge_id ] self . _num_edges -= 1
Removes the edge identified by edge_id from the graph .
59,210
def find_minimum_spanning_tree ( graph ) : mst = [ ] if graph . num_nodes ( ) == 0 : return mst if graph . num_edges ( ) == 0 : return mst connected_components = get_connected_components ( graph ) if len ( connected_components ) > 1 : raise DisconnectedGraphError edge_list = kruskal_mst ( graph ) return edge_list
Calculates a minimum spanning tree for a graph . Returns a list of edges that define the tree . Returns an empty list for an empty graph .
59,211
def find_minimum_spanning_tree_as_subgraph ( graph ) : edge_list = find_minimum_spanning_tree ( graph ) subgraph = get_subgraph_from_edge_list ( graph , edge_list ) return subgraph
Calculates a minimum spanning tree and returns a graph representation .
59,212
def find_minimum_spanning_forest ( graph ) : msf = [ ] if graph . num_nodes ( ) == 0 : return msf if graph . num_edges ( ) == 0 : return msf connected_components = get_connected_components_as_subgraphs ( graph ) for subgraph in connected_components : edge_list = kruskal_mst ( subgraph ) msf . append ( edge_list ) return msf
Calculates the minimum spanning forest of a disconnected graph . Returns a list of lists each containing the edges that define that tree . Returns an empty list for an empty graph .
59,213
def find_minimum_spanning_forest_as_subgraphs ( graph ) : forest = find_minimum_spanning_forest ( graph ) list_of_subgraphs = [ get_subgraph_from_edge_list ( graph , edge_list ) for edge_list in forest ] return list_of_subgraphs
Calculates the minimum spanning forest and returns a list of trees as subgraphs .
59,214
def kruskal_mst ( graph ) : edges_accepted = 0 ds = DisjointSet ( ) pq = PriorityQueue ( ) accepted_edges = [ ] label_lookup = { } nodes = graph . get_all_node_ids ( ) num_vertices = len ( nodes ) for n in nodes : label = ds . add_set ( ) label_lookup [ n ] = label edges = graph . get_all_edge_objects ( ) for e in edges : pq . put ( e [ 'id' ] , e [ 'cost' ] ) while edges_accepted < ( num_vertices - 1 ) : edge_id = pq . get ( ) edge = graph . get_edge ( edge_id ) node_a , node_b = edge [ 'vertices' ] label_a = label_lookup [ node_a ] label_b = label_lookup [ node_b ] a_set = ds . find ( label_a ) b_set = ds . find ( label_b ) if a_set != b_set : edges_accepted += 1 accepted_edges . append ( edge_id ) ds . union ( a_set , b_set ) return accepted_edges
Implements Kruskal s Algorithm for finding minimum spanning trees . Assumes a non - empty connected graph .
59,215
def __get_cycle ( graph , ordering , parent_lookup ) : root_node = ordering [ 0 ] for i in range ( 2 , len ( ordering ) ) : current_node = ordering [ i ] if graph . adjacent ( current_node , root_node ) : path = [ ] while current_node != root_node : path . append ( current_node ) current_node = parent_lookup [ current_node ] path . append ( root_node ) path . reverse ( ) return path
Gets the main cycle of the dfs tree .
59,216
def __get_segments_from_node ( node , graph ) : list_of_segments = [ ] node_object = graph . get_node ( node ) for e in node_object [ 'edges' ] : list_of_segments . append ( e ) return list_of_segments
Calculates the segments that can emanate from a particular node on the main cycle .
59,217
def __get_segments_from_cycle ( graph , cycle_path ) : list_of_segments = [ ] for n in cycle_path [ : : - 1 ] : segments = __get_segments_from_node ( n , graph ) if segments : list_of_segments . append ( segments ) return list_of_segments
Calculates the segments that emanate from the main cycle .
59,218
def make_subgraph ( graph , vertices , edges ) : local_graph = copy . deepcopy ( graph ) edges_to_delete = [ x for x in local_graph . get_all_edge_ids ( ) if x not in edges ] for e in edges_to_delete : local_graph . delete_edge_by_id ( e ) nodes_to_delete = [ x for x in local_graph . get_all_node_ids ( ) if x not in vertices ] for n in nodes_to_delete : local_graph . delete_node ( n ) return local_graph
Converts a subgraph given by a list of vertices and edges into a graph object .
59,219
def convert_graph_directed_to_undirected ( dg ) : udg = UndirectedGraph ( ) udg . nodes = copy . deepcopy ( dg . nodes ) udg . edges = copy . deepcopy ( dg . edges ) udg . next_node_id = dg . next_node_id udg . next_edge_id = dg . next_edge_id for edge_id in udg . get_all_edge_ids ( ) : edge = udg . get_edge ( edge_id ) target_node_id = edge [ 'vertices' ] [ 1 ] target_node = udg . get_node ( target_node_id ) target_node [ 'edges' ] . append ( edge_id ) return udg
Converts a directed graph into an undirected graph . Directed edges are made undirected .
59,220
def remove_duplicate_edges_directed ( dg ) : lookup = { } edges = sorted ( dg . get_all_edge_ids ( ) ) for edge_id in edges : e = dg . get_edge ( edge_id ) tpl = e [ 'vertices' ] if tpl in lookup : dg . delete_edge_by_id ( edge_id ) else : lookup [ tpl ] = edge_id
Removes duplicate edges from a directed graph .
59,221
def remove_duplicate_edges_undirected ( udg ) : lookup = { } edges = sorted ( udg . get_all_edge_ids ( ) ) for edge_id in edges : e = udg . get_edge ( edge_id ) tpl_a = e [ 'vertices' ] tpl_b = ( tpl_a [ 1 ] , tpl_a [ 0 ] ) if tpl_a in lookup or tpl_b in lookup : udg . delete_edge_by_id ( edge_id ) else : lookup [ tpl_a ] = edge_id lookup [ tpl_b ] = edge_id
Removes duplicate edges from an undirected graph .
59,222
def get_vertices_from_edge_list ( graph , edge_list ) : node_set = set ( ) for edge_id in edge_list : edge = graph . get_edge ( edge_id ) a , b = edge [ 'vertices' ] node_set . add ( a ) node_set . add ( b ) return list ( node_set )
Transforms a list of edges into a list of the nodes those edges connect . Returns a list of nodes or an empty list if given an empty list .
59,223
def get_subgraph_from_edge_list ( graph , edge_list ) : node_list = get_vertices_from_edge_list ( graph , edge_list ) subgraph = make_subgraph ( graph , node_list , edge_list ) return subgraph
Transforms a list of edges into a subgraph .
59,224
def merge_graphs ( main_graph , addition_graph ) : node_mapping = { } edge_mapping = { } for node in addition_graph . get_all_node_objects ( ) : node_id = node [ 'id' ] new_id = main_graph . new_node ( ) node_mapping [ node_id ] = new_id for edge in addition_graph . get_all_edge_objects ( ) : edge_id = edge [ 'id' ] old_vertex_a_id , old_vertex_b_id = edge [ 'vertices' ] new_vertex_a_id = node_mapping [ old_vertex_a_id ] new_vertex_b_id = node_mapping [ old_vertex_b_id ] new_edge_id = main_graph . new_edge ( new_vertex_a_id , new_vertex_b_id ) edge_mapping [ edge_id ] = new_edge_id return node_mapping , edge_mapping
Merges an addition_graph into the main_graph . Returns a tuple of dictionaries mapping old node ids and edge ids to new ids .
59,225
def create_graph_from_adjacency_matrix ( adjacency_matrix ) : if is_adjacency_matrix_symmetric ( adjacency_matrix ) : graph = UndirectedGraph ( ) else : graph = DirectedGraph ( ) node_column_mapping = [ ] num_columns = len ( adjacency_matrix ) for _ in range ( num_columns ) : node_id = graph . new_node ( ) node_column_mapping . append ( node_id ) for j in range ( num_columns ) : for i in range ( num_columns ) : if adjacency_matrix [ j ] [ i ] : jnode_id = node_column_mapping [ j ] inode_id = node_column_mapping [ i ] graph . new_edge ( inode_id , jnode_id ) return ( graph , node_column_mapping )
Generates a graph from an adjacency matrix specification . Returns a tuple containing the graph and a list - mapping of node ids to matrix column indices .
59,226
def add_set ( self ) : self . __label_counter += 1 new_label = self . __label_counter self . __forest [ new_label ] = - 1 self . __set_counter += 1 return new_label
Adds a new set to the forest . Returns a label by which the new set can be referenced
59,227
def find ( self , node_label ) : queue = [ ] current_node = node_label while self . __forest [ current_node ] >= 0 : queue . append ( current_node ) current_node = self . __forest [ current_node ] root_node = current_node for n in queue : self . __forest [ n ] = root_node return root_node
Finds the set containing the node_label . Returns the set label .
59,228
def union ( self , label_a , label_b ) : if label_a == label_b : return root_a = self . find ( label_a ) root_b = self . find ( label_b ) if root_a == root_b : return self . __internal_union ( root_a , root_b ) self . __set_counter -= 1
Joins two sets into a single new set . label_a label_b can be any nodes within the sets
59,229
def __internal_union ( self , root_a , root_b ) : update_rank = False rank_a = self . __forest [ root_a ] rank_b = self . __forest [ root_b ] if rank_a < rank_b : larger = root_b smaller = root_a else : larger = root_a smaller = root_b if rank_a == rank_b : update_rank = True self . __forest [ smaller ] = larger if update_rank : self . __forest [ larger ] -= 1
Internal function to join two set trees specified by root_a and root_b . Assumes root_a and root_b are distinct .
59,230
def is_planar ( graph ) : connected_components = get_connected_components_as_subgraphs ( graph ) for component in connected_components : biconnected_components = find_biconnected_components_as_subgraphs ( component ) for bi_component in biconnected_components : planarity = __is_subgraph_planar ( bi_component ) if not planarity : return False return True
Determines whether a graph is planar or not .
59,231
def __is_subgraph_planar ( graph ) : num_nodes = graph . num_nodes ( ) num_edges = graph . num_edges ( ) if num_nodes < 5 : return True if num_edges > 3 * ( num_nodes - 2 ) : return False return kocay_planarity_test ( graph )
Internal function to determine if a subgraph is planar .
59,232
def __setup_dfs_data ( graph , adj ) : dfs_data = __get_dfs_data ( graph , adj ) dfs_data [ 'graph' ] = graph dfs_data [ 'adj' ] = adj L1 , L2 = __low_point_dfs ( dfs_data ) dfs_data [ 'lowpoint_1_lookup' ] = L1 dfs_data [ 'lowpoint_2_lookup' ] = L2 edge_weights = __calculate_edge_weights ( dfs_data ) dfs_data [ 'edge_weights' ] = edge_weights return dfs_data
Sets up the dfs_data object for consistency .
59,233
def __calculate_edge_weights ( dfs_data ) : graph = dfs_data [ 'graph' ] weights = { } for edge_id in graph . get_all_edge_ids ( ) : edge_weight = __edge_weight ( edge_id , dfs_data ) weights [ edge_id ] = edge_weight return weights
Calculates the weight of each edge for embedding - order sorting .
59,234
def __sort_adjacency_lists ( dfs_data ) : new_adjacency_lists = { } adjacency_lists = dfs_data [ 'adj' ] edge_weights = dfs_data [ 'edge_weights' ] edge_lookup = dfs_data [ 'edge_lookup' ] for node_id , adj_list in list ( adjacency_lists . items ( ) ) : node_weight_lookup = { } frond_lookup = { } for node_b in adj_list : edge_id = dfs_data [ 'graph' ] . get_first_edge_id_by_node_ids ( node_id , node_b ) node_weight_lookup [ node_b ] = edge_weights [ edge_id ] frond_lookup [ node_b ] = 1 if edge_lookup [ edge_id ] == 'backedge' else 2 new_list = sorted ( adj_list , key = lambda n : frond_lookup [ n ] ) new_list . sort ( key = lambda n : node_weight_lookup [ n ] ) new_adjacency_lists [ node_id ] = new_list return new_adjacency_lists
Sorts the adjacency list representation by the edge weights .
59,235
def __branch_point_dfs_recursive ( u , large_n , b , stem , dfs_data ) : first_vertex = dfs_data [ 'adj' ] [ u ] [ 0 ] large_w = wt ( u , first_vertex , dfs_data ) if large_w % 2 == 0 : large_w += 1 v_I = 0 v_II = 0 for v in [ v for v in dfs_data [ 'adj' ] [ u ] if wt ( u , v , dfs_data ) <= large_w ] : stem [ u ] = v if a ( v , dfs_data ) == u : large_n [ v ] = 0 if wt ( u , v , dfs_data ) % 2 == 0 : v_I = v else : b_u = b [ u ] l2_v = L2 ( v , dfs_data ) if l2_v < b_u : large_n [ v ] = 1 elif b_u != 1 : xnode = stem [ l2_v ] if large_n [ xnode ] != 0 : large_n [ v ] = large_n [ xnode ] + 1 elif dfs_data [ 'graph' ] . adjacent ( u , L1 ( v , dfs_data ) ) : large_n [ v ] = 2 else : large_n [ v ] = large_n [ u ] if large_n [ v ] % 2 == 0 : v_II = v break if v_II != 0 : dfs_data [ 'adj' ] [ u ] . remove ( v_II ) dfs_data [ 'adj' ] [ u ] . insert ( 0 , v_II ) elif v_I != 0 : dfs_data [ 'adj' ] [ u ] . remove ( v_I ) dfs_data [ 'adj' ] [ u ] . insert ( 0 , v_I ) first_time = True for v in dfs_data [ 'adj' ] [ u ] : if a ( v , dfs_data ) == u : b [ v ] = u if first_time : b [ v ] = b [ u ] elif wt ( u , v , dfs_data ) % 2 == 0 : large_n [ v ] = 0 else : large_n [ v ] = 1 stem [ u ] = v __branch_point_dfs_recursive ( v , large_n , b , stem , dfs_data ) first_time = False return
A recursive implementation of the BranchPtDFS function as defined on page 14 of the paper .
59,236
def __embed_branch ( dfs_data ) : u = dfs_data [ 'ordering' ] [ 0 ] dfs_data [ 'LF' ] = [ ] dfs_data [ 'RF' ] = [ ] dfs_data [ 'FG' ] = { } n = dfs_data [ 'graph' ] . num_nodes ( ) f0 = ( 0 , n ) g0 = ( 0 , n ) L0 = { 'u' : 0 , 'v' : n } R0 = { 'x' : 0 , 'y' : n } dfs_data [ 'LF' ] . append ( f0 ) dfs_data [ 'RF' ] . append ( g0 ) dfs_data [ 'FG' ] [ 0 ] = [ L0 , R0 ] dfs_data [ 'FG' ] [ 'm' ] = 0 dfs_data [ 'FG' ] [ 'l' ] = 0 dfs_data [ 'FG' ] [ 'r' ] = 0 nonplanar = __embed_branch_recursive ( u , dfs_data ) return not nonplanar
Builds the combinatorial embedding of the graph . Returns whether the graph is planar .
59,237
def __embed_branch_recursive ( u , dfs_data ) : for v in dfs_data [ 'adj' ] [ u ] : nonplanar = True if a ( v , dfs_data ) == u : if b ( v , dfs_data ) == u : successful = __insert_branch ( u , v , dfs_data ) if not successful : nonplanar = True return nonplanar nonplanar = __embed_branch_recursive ( v , dfs_data ) if nonplanar : return nonplanar elif is_frond ( u , v , dfs_data ) : successful = __embed_frond ( u , v , dfs_data ) if not successful : nonplanar = True return nonplanar else : pass nonplanar = False return nonplanar
A recursive implementation of the EmbedBranch function as defined on pages 8 and 22 of the paper .
59,238
def __embed_frond ( node_u , node_w , dfs_data , as_branch_marker = False ) : d_u = D ( node_u , dfs_data ) d_w = D ( node_w , dfs_data ) comp_d_w = abs ( d_w ) if as_branch_marker : d_w *= - 1 if dfs_data [ 'last_inserted_side' ] == 'LF' : __insert_frond_RF ( d_w , d_u , dfs_data ) else : __insert_frond_LF ( d_w , d_u , dfs_data ) return True LF = dfs_data [ 'LF' ] m = dfs_data [ 'FG' ] [ 'm' ] l_w = lw ( dfs_data ) r_w = rw ( dfs_data ) u_m = u ( m , dfs_data ) x_m = fn_x ( m , dfs_data ) case_1 = False case_2 = False case_3 = False if d_u > u_m and d_u > x_m : case_1 = True elif d_u <= u_m and d_u > x_m : case_2 = True elif d_u > u_m and d_u <= x_m : case_3 = True else : return False if comp_d_w >= l_w and comp_d_w >= r_w : __insert_frond_LF ( d_w , d_u , dfs_data ) dfs_data [ 'FG' ] [ 'm' ] += 1 m = dfs_data [ 'FG' ] [ 'm' ] n = dfs_data [ 'graph' ] . num_nodes ( ) Lm = { 'u' : d_w , 'v' : d_u } Rm = { 'x' : n , 'y' : 0 } dfs_data [ 'FG' ] [ m ] = [ Lm , Rm ] return True elif comp_d_w >= l_w and comp_d_w < r_w : return __do_case_5_work ( d_w , d_u , case_1 , case_2 , case_3 , dfs_data ) elif comp_d_w < l_w and comp_d_w >= r_w : return __do_case_6_work ( d_w , d_u , case_1 , case_2 , case_3 , dfs_data ) elif comp_d_w < l_w and comp_d_w < r_w : while comp_d_w < l_w and comp_d_w < r_w : if d_u > u_m and d_u > x_m : return False switch_sides ( d_u , dfs_data ) l_w = lw ( dfs_data ) r_w = rw ( dfs_data ) m = dfs_data [ 'FG' ] [ 'm' ] u_m = u ( m , dfs_data ) x_m = fn_x ( m , dfs_data ) case_1 = False case_2 = False case_3 = False if d_u <= u_m and d_u > x_m : case_2 = True elif d_u > u_m and d_u <= x_m : case_3 = True if comp_d_w >= l_w and comp_d_w < r_w : return __do_case_5_work ( d_w , d_u , case_1 , case_2 , case_3 , dfs_data ) if comp_d_w < l_w and comp_d_w >= r_w : return __do_case_6_work ( d_w , d_u , case_1 , case_2 , case_3 , dfs_data ) else : return False return False
Embeds a frond uw into either LF or RF . Returns whether the embedding was successful .
59,239
def __insert_frond_RF ( d_w , d_u , dfs_data ) : dfs_data [ 'RF' ] . append ( ( d_w , d_u ) ) dfs_data [ 'FG' ] [ 'r' ] += 1 dfs_data [ 'last_inserted_side' ] = 'RF'
Encapsulates the process of inserting a frond uw into the right side frond group .
59,240
def __insert_frond_LF ( d_w , d_u , dfs_data ) : dfs_data [ 'LF' ] . append ( ( d_w , d_u ) ) dfs_data [ 'FG' ] [ 'l' ] += 1 dfs_data [ 'last_inserted_side' ] = 'LF'
Encapsulates the process of inserting a frond uw into the left side frond group .
59,241
def merge_Fm ( dfs_data ) : FG = dfs_data [ 'FG' ] m = FG [ 'm' ] FGm = FG [ m ] FGm1 = FG [ m - 1 ] if FGm [ 0 ] [ 'u' ] < FGm1 [ 0 ] [ 'u' ] : FGm1 [ 0 ] [ 'u' ] = FGm [ 0 ] [ 'u' ] if FGm [ 0 ] [ 'v' ] > FGm1 [ 0 ] [ 'v' ] : FGm1 [ 0 ] [ 'v' ] = FGm [ 0 ] [ 'v' ] if FGm [ 1 ] [ 'x' ] < FGm1 [ 1 ] [ 'x' ] : FGm1 [ 1 ] [ 'x' ] = FGm [ 1 ] [ 'x' ] if FGm [ 1 ] [ 'y' ] > FGm1 [ 1 ] [ 'y' ] : FGm1 [ 1 ] [ 'y' ] = FGm [ 1 ] [ 'y' ] del FG [ m ] FG [ 'm' ] -= 1
Merges Fm - 1 and Fm as defined on page 19 of the paper .
59,242
def __check_left_side_conflict ( x , y , dfs_data ) : l = dfs_data [ 'FG' ] [ 'l' ] w , z = dfs_data [ 'LF' ] [ l ] return __check_conflict_fronds ( x , y , w , z , dfs_data )
Checks to see if the frond xy will conflict with a frond on the left side of the embedding .
59,243
def __check_right_side_conflict ( x , y , dfs_data ) : r = dfs_data [ 'FG' ] [ 'r' ] w , z = dfs_data [ 'RF' ] [ r ] return __check_conflict_fronds ( x , y , w , z , dfs_data )
Checks to see if the frond xy will conflict with a frond on the right side of the embedding .
59,244
def __check_conflict_fronds ( x , y , w , z , dfs_data ) : if x < 0 and w < 0 and ( x == y or w == z ) : if x == w : return True return False if b ( x , dfs_data ) == b ( w , dfs_data ) and x > w and w > y and y > z : return False if x < 0 or w < 0 : if x < 0 : u = abs ( x ) t = y x = w y = z else : u = abs ( w ) t = z if b ( x , dfs_data ) == u and y < u and ( x , y ) in __dfsify_branch_uv ( u , t , dfs_data ) : return True return False return False
Checks a pair of fronds to see if they conflict . Returns True if a conflict was found False otherwise .
59,245
def __calculate_adjacency_lists ( graph ) : adj = { } for node in graph . get_all_node_ids ( ) : neighbors = graph . neighbors ( node ) adj [ node ] = neighbors return adj
Builds an adjacency list representation for the graph since we can t guarantee that the internal representation of the graph is stored that way .
59,246
def __get_all_lowpoints ( dfs_data ) : lowpoint_1_lookup = { } lowpoint_2_lookup = { } ordering = dfs_data [ 'ordering' ] for node in ordering : low_1 , low_2 = __get_lowpoints ( node , dfs_data ) lowpoint_1_lookup [ node ] = low_1 lowpoint_2_lookup [ node ] = low_2 return lowpoint_1_lookup , lowpoint_2_lookup
Calculates the lowpoints for each node in a graph .
59,247
def __get_lowpoints ( node , dfs_data ) : ordering_lookup = dfs_data [ 'ordering_lookup' ] t_u = T ( node , dfs_data ) sorted_t_u = sorted ( t_u , key = lambda a : ordering_lookup [ a ] ) lowpoint_1 = sorted_t_u [ 0 ] lowpoint_2 = sorted_t_u [ 1 ] return lowpoint_1 , lowpoint_2
Calculates the lowpoints for a single node in a graph .
59,248
def __edge_weight ( edge_id , dfs_data ) : graph = dfs_data [ 'graph' ] edge_lookup = dfs_data [ 'edge_lookup' ] edge = graph . get_edge ( edge_id ) u , v = edge [ 'vertices' ] d_u = D ( u , dfs_data ) d_v = D ( v , dfs_data ) lp_1 = L1 ( v , dfs_data ) d_lp_1 = D ( lp_1 , dfs_data ) if edge_lookup [ edge_id ] == 'backedge' and d_v < d_u : return 2 * d_v elif is_type_I_branch ( u , v , dfs_data ) : return 2 * d_lp_1 elif is_type_II_branch ( u , v , dfs_data ) : return 2 * d_lp_1 + 1 else : return 2 * graph . num_nodes ( ) + 1
Calculates the edge weight used to sort edges .
59,249
def is_type_I_branch ( u , v , dfs_data ) : if u != a ( v , dfs_data ) : return False if u == L2 ( v , dfs_data ) : return True return False
Determines whether a branch uv is a type I branch .
59,250
def is_type_II_branch ( u , v , dfs_data ) : if u != a ( v , dfs_data ) : return False if u < L2 ( v , dfs_data ) : return True return False
Determines whether a branch uv is a type II branch .
59,251
def __get_descendants ( node , dfs_data ) : list_of_descendants = [ ] stack = deque ( ) children_lookup = dfs_data [ 'children_lookup' ] current_node = node children = children_lookup [ current_node ] dfs_current_node = D ( current_node , dfs_data ) for n in children : dfs_child = D ( n , dfs_data ) if dfs_child > dfs_current_node : stack . append ( n ) while len ( stack ) > 0 : current_node = stack . pop ( ) list_of_descendants . append ( current_node ) children = children_lookup [ current_node ] dfs_current_node = D ( current_node , dfs_data ) for n in children : dfs_child = D ( n , dfs_data ) if dfs_child > dfs_current_node : stack . append ( n ) return list_of_descendants
Gets the descendants of a node .
59,252
def S_star ( u , dfs_data ) : s_u = S ( u , dfs_data ) if u not in s_u : s_u . append ( u ) return s_u
The set of all descendants of u with u added .
59,253
def classify_segmented_recording ( recording , result_format = None ) : global single_symbol_classifier if single_symbol_classifier is None : single_symbol_classifier = SingleClassificer ( ) return single_symbol_classifier . predict ( recording , result_format )
Use this function if you are sure you have a single symbol .
59,254
def predict ( self , recording , result_format = None ) : evaluate = utils . evaluate_model_single_recording_preloaded results = evaluate ( self . preprocessing_queue , self . feature_list , self . model , self . output_semantics , recording ) if result_format == 'LaTeX' : for i in range ( len ( results ) ) : results [ i ] [ 'semantics' ] = results [ i ] [ 'semantics' ] . split ( ";" ) [ 1 ] for i in range ( len ( results ) ) : splitted = results [ i ] [ 'semantics' ] . split ( ";" ) results [ i ] [ 'complete_latex' ] = splitted [ 1 ] return results
Predict the class of the given recording .
59,255
def get_symbol_ids ( symbol_yml_file , metadata ) : with open ( symbol_yml_file , 'r' ) as stream : symbol_cfg = yaml . load ( stream ) symbol_ids = [ ] symbol_ids_set = set ( ) for symbol in symbol_cfg : if 'latex' not in symbol : logging . error ( "Key 'latex' not found for a symbol in %s (%s)" , symbol_yml_file , symbol ) sys . exit ( - 1 ) results = [ el for el in metadata [ 'symbols' ] if el [ 'formula_in_latex' ] == symbol [ 'latex' ] ] if len ( results ) != 1 : logging . warning ( "Found %i results for %s: %s" , len ( results ) , symbol [ 'latex' ] , results ) if len ( results ) > 1 : results = sorted ( results , key = lambda n : n [ 'id' ] ) else : sys . exit ( - 1 ) mapping_ids = [ results [ 0 ] [ 'id' ] ] if 'mappings' in symbol : for msymbol in symbol [ 'mappings' ] : filtered = [ el for el in metadata [ 'symbols' ] if el [ 'formula_in_latex' ] == msymbol [ 'latex' ] ] if len ( filtered ) != 1 : logging . error ( "Found %i results for %s: %s" , len ( filtered ) , msymbol , filtered ) if len ( filtered ) > 1 : filtered = natsorted ( filtered , key = lambda n : n [ 'id' ] ) else : sys . exit ( - 1 ) mapping_ids . append ( filtered [ 0 ] [ 'id' ] ) symbol_ids . append ( { 'id' : int ( results [ 0 ] [ 'id' ] ) , 'formula_in_latex' : results [ 0 ] [ 'formula_in_latex' ] , 'mappings' : mapping_ids } ) for id_tmp in mapping_ids : if id_tmp not in symbol_ids_set : symbol_ids_set . add ( id_tmp ) else : for symbol_tmp in symbol_ids : if id_tmp in symbol_tmp [ 'mappings' ] : break logging . error ( 'Symbol id %s is already used: %s' , id_tmp , symbol_tmp ) sys . exit ( - 1 ) logging . info ( '%i base classes and %i write-math ids.' , len ( symbol_ids ) , len ( symbol_ids_set ) ) return symbol_ids
Get a list of ids which describe which class they get mapped to .
59,256
def read_csv ( filepath ) : symbols = [ ] with open ( filepath , 'rb' ) as csvfile : spamreader = csv . DictReader ( csvfile , delimiter = ',' , quotechar = '"' ) for row in spamreader : symbols . append ( row ) return symbols
Read a CSV into a list of dictionarys . The first line of the CSV determines the keys of the dictionary .
59,257
def load_raw ( raw_pickle_file ) : with open ( raw_pickle_file , 'rb' ) as f : raw = pickle . load ( f ) logging . info ( "Loaded %i recordings." , len ( raw [ 'handwriting_datasets' ] ) ) return raw
Load a pickle file of raw recordings .
59,258
def get_metrics ( metrics_description ) : return utils . get_objectlist ( metrics_description , config_key = 'data_analyzation_plugins' , module = sys . modules [ __name__ ] )
Get metrics from a list of dictionaries .
59,259
def prepare_file ( filename ) : directory = os . path . join ( utils . get_project_root ( ) , "analyzation/" ) if not os . path . exists ( directory ) : os . makedirs ( directory ) workfilename = os . path . join ( directory , filename ) open ( workfilename , 'w' ) . close ( ) return workfilename
Truncate the file and return the filename .
59,260
def sort_by_formula_id ( raw_datasets ) : by_formula_id = defaultdict ( list ) for el in raw_datasets : by_formula_id [ el [ 'handwriting' ] . formula_id ] . append ( el [ 'handwriting' ] ) return by_formula_id
Sort a list of formulas by id where id represents the accepted formula id .
59,261
def _write_data ( self , symbols , err_recs , nr_recordings , total_error_count , percentages , time_max_list ) : write_file = open ( self . filename , "a" ) s = "" for symbol , count in sorted ( symbols . items ( ) , key = lambda n : n [ 0 ] ) : if symbol in [ 'a' , '0' , 'A' ] : s += "\n%s (%i), " % ( symbol , count ) elif symbol in [ 'z' , '9' , 'Z' ] : s += "%s (%i) \n" % ( symbol , count ) else : s += "%s (%i), " % ( symbol , count ) print ( "## Data" , file = write_file ) print ( "Symbols: %i" % len ( symbols ) , file = write_file ) print ( "Recordings: %i" % sum ( symbols . values ( ) ) , file = write_file ) print ( "```" , file = write_file ) print ( s [ : - 1 ] , file = write_file ) print ( "```" , file = write_file ) print ( "Recordings with wild points: %i (%0.2f%%)" % ( err_recs [ 'wild_points' ] , float ( err_recs [ 'wild_points' ] ) / nr_recordings * 100 ) , file = write_file ) print ( "wild points: %i" % total_error_count [ 'wild_points' ] , file = write_file ) print ( "Recordings with missing stroke: %i (%0.2f%%)" % ( err_recs [ 'missing_stroke' ] , float ( err_recs [ 'missing_stroke' ] ) / nr_recordings * 100 ) , file = write_file ) print ( "Recordings with errors: %i (%0.2f%%)" % ( err_recs [ 'total' ] , float ( err_recs [ 'total' ] ) / nr_recordings * 100 ) , file = write_file ) print ( "Recordings with dots: %i (%0.2f%%)" % ( err_recs [ 'single_dots' ] , float ( err_recs [ 'single_dots' ] ) / nr_recordings * 100 ) , file = write_file ) print ( "dots: %i" % total_error_count [ 'single_dots' ] , file = write_file ) print ( "size changing removal: %i (%0.2f%%)" % ( len ( percentages ) , float ( len ( percentages ) ) / nr_recordings * 100 ) , file = write_file ) print ( "%i recordings took more than %i ms. That were: " % ( len ( time_max_list ) , self . time_max_threshold ) , file = write_file ) for recording in time_max_list : print ( "* %ims: %s: %s" % ( recording . get_time ( ) , utils . get_readable_time ( recording . get_time ( ) ) , recording ) , file = write_file ) write_file . close ( )
Write all obtained data to a file .
59,262
def print_featurelist ( feature_list ) : input_features = sum ( map ( lambda n : n . get_dimension ( ) , feature_list ) ) print ( "## Features (%i)" % input_features ) print ( "```" ) for algorithm in feature_list : print ( "* %s" % str ( algorithm ) ) print ( "```" )
Print the feature_list in a human - readable form .
59,263
def _stroke_simplification ( self , pointlist ) : dmax = 0 index = 0 for i in range ( 1 , len ( pointlist ) ) : d = geometry . perpendicular_distance ( pointlist [ i ] , pointlist [ 0 ] , pointlist [ - 1 ] ) if d > dmax : index = i dmax = d if dmax >= self . epsilon : rec_results1 = self . _stroke_simplification ( pointlist [ 0 : index ] ) rec_results2 = self . _stroke_simplification ( pointlist [ index : ] ) result_list = rec_results1 [ : - 1 ] + rec_results2 else : result_list = [ pointlist [ 0 ] , pointlist [ - 1 ] ] return result_list
The Douglas - Peucker line simplification takes a list of points as an argument . It tries to simplifiy this list by removing as many points as possible while still maintaining the overall shape of the stroke . It does so by taking the first and the last point connecting them by a straight line and searchin for the point with the highest distance . If that distance is bigger than epsilon the point is important and the algorithm continues recursively .
59,264
def get_preprocessing_queue ( preprocessing_list ) : return utils . get_objectlist ( preprocessing_list , config_key = 'preprocessing' , module = sys . modules [ __name__ ] )
Get preprocessing queue from a list of dictionaries
59,265
def print_preprocessing_list ( preprocessing_queue ) : print ( "## Preprocessing" ) print ( "```" ) for algorithm in preprocessing_queue : print ( "* " + str ( algorithm ) ) print ( "```" )
Print the preproc_list in a human - readable form .
59,266
def _get_parameters ( self , hwr_obj ) : a = hwr_obj . get_bounding_box ( ) width = a [ 'maxx' ] - a [ 'minx' ] + self . width_add height = a [ 'maxy' ] - a [ 'miny' ] + self . height_add factor_x , factor_y = 1 , 1 if width != 0 : factor_x = self . max_width / width if height != 0 : factor_y = self . max_height / height factor = min ( factor_x , factor_y ) addx , addy = 0.0 , 0.0 if self . center : add = - ( factor / ( 2.0 * max ( factor_x , factor_y ) ) ) if factor == factor_x : addy = add if self . center_other : addx = - ( width * factor / 2.0 ) else : addx = add if self . center_other : addy = - ( height * factor / 2.0 ) assert factor > 0 , "factor > 0 is False. factor = %s" % str ( factor ) assert isinstance ( addx , float ) , "addx is %s" % str ( addx ) assert isinstance ( addy , float ) , "addy is %s" % str ( addy ) assert isinstance ( a [ 'minx' ] , ( int , float ) ) , "minx is %s" % str ( a [ 'minx' ] ) assert isinstance ( a [ 'miny' ] , ( int , float ) ) , "miny is %s" % str ( a [ 'miny' ] ) assert isinstance ( a [ 'mint' ] , ( int , float ) ) , "mint is %s" % str ( a [ 'mint' ] ) return { "factor" : factor , "addx" : addx , "addy" : addy , "minx" : a [ 'minx' ] , "miny" : a [ 'miny' ] , "mint" : a [ 'mint' ] }
Take a list of points and calculate the factors for scaling and moving it so that it s in the unit square . Keept the aspect ratio . Optionally center the points inside of the unit square .
59,267
def _calculate_pen_down_strokes ( self , pointlist , times = None ) : if times is None : times = [ ] for stroke in pointlist : stroke_info = { "start" : stroke [ 0 ] [ 'time' ] , "end" : stroke [ - 1 ] [ 'time' ] , "pen_down" : True } x , y , t = [ ] , [ ] , [ ] for point in stroke : if point [ 'time' ] not in t : x . append ( point [ 'x' ] ) y . append ( point [ 'y' ] ) t . append ( point [ 'time' ] ) x , y = numpy . array ( x ) , numpy . array ( y ) if len ( t ) == 1 : fx , fy = lambda x : float ( x ) , lambda y : float ( y ) elif len ( t ) == 2 : fx , fy = interp1d ( t , x , 'linear' ) , interp1d ( t , y , 'linear' ) elif len ( t ) == 3 : fx = interp1d ( t , x , 'quadratic' ) fy = interp1d ( t , y , 'quadratic' ) else : fx , fy = interp1d ( t , x , self . kind ) , interp1d ( t , y , self . kind ) stroke_info [ 'fx' ] = fx stroke_info [ 'fy' ] = fy times . append ( stroke_info ) return times
Calculate the intervall borders times that contain the information when a stroke started when it ended and how it should be interpolated .
59,268
def _calculate_pen_up_strokes ( self , pointlist , times = None ) : if times is None : times = [ ] for i in range ( len ( pointlist ) - 1 ) : stroke_info = { "start" : pointlist [ i ] [ - 1 ] [ 'time' ] , "end" : pointlist [ i + 1 ] [ 0 ] [ 'time' ] , "pen_down" : False } x , y , t = [ ] , [ ] , [ ] for point in [ pointlist [ i ] [ - 1 ] , pointlist [ i + 1 ] [ 0 ] ] : if point [ 'time' ] not in t : x . append ( point [ 'x' ] ) y . append ( point [ 'y' ] ) t . append ( point [ 'time' ] ) if len ( x ) == 1 : fx , fy = lambda x : float ( x ) , lambda y : float ( y ) else : x , y = numpy . array ( x ) , numpy . array ( y ) fx = interp1d ( t , x , kind = 'linear' ) fy = interp1d ( t , y , kind = 'linear' ) stroke_info [ 'fx' ] = fx stroke_info [ 'fy' ] = fy times . append ( stroke_info ) return times
Pen - up strokes are virtual strokes that were not drawn . It models the time when the user moved from one stroke to the next .
59,269
def _space ( self , hwr_obj , stroke , kind ) : new_stroke = [ ] stroke = sorted ( stroke , key = lambda p : p [ 'time' ] ) x , y , t = [ ] , [ ] , [ ] for point in stroke : x . append ( point [ 'x' ] ) y . append ( point [ 'y' ] ) t . append ( point [ 'time' ] ) x , y = numpy . array ( x ) , numpy . array ( y ) failed = False try : fx = interp1d ( t , x , kind = kind ) fy = interp1d ( t , y , kind = kind ) except Exception as e : if hwr_obj . raw_data_id is not None : logging . debug ( "spline failed for raw_data_id %i" , hwr_obj . raw_data_id ) else : logging . debug ( "spline failed" ) logging . debug ( e ) failed = True tnew = numpy . linspace ( t [ 0 ] , t [ - 1 ] , self . number ) if failed : try : fx = interp1d ( t , x , kind = 'linear' ) fy = interp1d ( t , y , kind = 'linear' ) failed = False except Exception as e : logging . debug ( "len(stroke) = %i" , len ( stroke ) ) logging . debug ( "len(x) = %i" , len ( x ) ) logging . debug ( "len(y) = %i" , len ( y ) ) logging . debug ( "stroke=%s" , stroke ) raise e for x , y , t in zip ( fx ( tnew ) , fy ( tnew ) , tnew ) : new_stroke . append ( { 'x' : x , 'y' : y , 'time' : t } ) return new_stroke
Do the interpolation of kind for stroke
59,270
def _calculate_average ( self , points ) : assert len ( self . theta ) == len ( points ) , "points has length %i, but should have length %i" % ( len ( points ) , len ( self . theta ) ) new_point = { 'x' : 0 , 'y' : 0 , 'time' : 0 } for key in new_point : new_point [ key ] = self . theta [ 0 ] * points [ 0 ] [ key ] + self . theta [ 1 ] * points [ 1 ] [ key ] + self . theta [ 2 ] * points [ 2 ] [ key ] return new_point
Calculate the arithmetic mean of the points x and y coordinates seperately .
59,271
def create_model ( model_folder , model_type , topology , override ) : latest_model = utils . get_latest_in_folder ( model_folder , ".json" ) if ( latest_model == "" ) or override : logging . info ( "Create a base model..." ) model_src = os . path . join ( model_folder , "model-0.json" ) command = "%s make %s %s > %s" % ( utils . get_nntoolkit ( ) , model_type , topology , model_src ) logging . info ( command ) os . system ( command ) else : logging . info ( "Model file already existed." )
Create a model if it doesn t exist already .
59,272
def main ( model_folder , override = False ) : model_description_file = os . path . join ( model_folder , "info.yml" ) with open ( model_description_file , 'r' ) as ymlfile : model_description = yaml . load ( ymlfile ) project_root = utils . get_project_root ( ) feature_folder = os . path . join ( project_root , model_description [ 'data-source' ] ) with open ( os . path . join ( feature_folder , "info.yml" ) , 'r' ) as ymlfile : feature_description = yaml . load ( ymlfile ) feature_list = features . get_features ( feature_description [ 'features' ] ) input_features = sum ( map ( lambda n : n . get_dimension ( ) , feature_list ) ) logging . info ( "Number of features: %i" , input_features ) logging . info ( model_description [ 'model' ] ) if model_description [ 'model' ] [ 'type' ] != 'mlp' : return create_model ( model_folder , model_description [ 'model' ] [ 'type' ] , model_description [ 'model' ] [ 'topology' ] , override ) utils . create_run_logfile ( model_folder )
Parse the info . yml from model_folder and create the model file .
59,273
def interactive ( ) : global n if request . method == 'GET' and request . args . get ( 'heartbeat' , '' ) != "" : return request . args . get ( 'heartbeat' , '' ) if request . method == 'POST' : logging . warning ( 'POST to /interactive is deprecated. ' 'Use /worker instead' ) else : return render_template ( 'canvas.html' )
Interactive classifier .
59,274
def _get_part ( pointlist , strokes ) : result = [ ] strokes = sorted ( strokes ) for stroke_index in strokes : result . append ( pointlist [ stroke_index ] ) return result
Get some strokes of pointlist
59,275
def _get_translate ( ) : translate = { } model_path = pkg_resources . resource_filename ( 'hwrt' , 'misc/' ) translation_csv = os . path . join ( model_path , 'latex2writemathindex.csv' ) arguments = { 'newline' : '' , 'encoding' : 'utf8' } with open ( translation_csv , 'rt' , ** arguments ) as csvfile : contents = csvfile . read ( ) lines = contents . split ( "\n" ) for csvrow in lines : csvrow = csvrow . split ( ',' ) if len ( csvrow ) == 1 : writemathid = csvrow [ 0 ] latex = "" else : writemathid , latex = csvrow [ 0 ] , csvrow [ 1 : ] latex = ',' . join ( latex ) translate [ latex ] = writemathid return translate
Get a dictionary which translates from a neural network output to semantics .
59,276
def main ( port = 8000 , n_output = 10 , use_segmenter = False ) : global n global use_segmenter_flag n = n_output use_segmenter_flag = use_segmenter logging . info ( "Start webserver..." ) app . run ( port = port )
Main function starting the webserver .
59,277
def generate_training_command ( model_folder ) : update_if_outdated ( model_folder ) model_description_file = os . path . join ( model_folder , "info.yml" ) with open ( model_description_file , 'r' ) as ymlfile : model_description = yaml . load ( ymlfile ) project_root = utils . get_project_root ( ) data = { } data [ 'training' ] = os . path . join ( project_root , model_description [ "data-source" ] , "traindata.hdf5" ) data [ 'testing' ] = os . path . join ( project_root , model_description [ "data-source" ] , "testdata.hdf5" ) data [ 'validating' ] = os . path . join ( project_root , model_description [ "data-source" ] , "validdata.hdf5" ) basename = "model" latest_model = utils . get_latest_working_model ( model_folder ) if latest_model == "" : logging . error ( "There is no model with basename '%s'." , basename ) return None else : logging . info ( "Model '%s' found." , latest_model ) i = int ( latest_model . split ( "-" ) [ - 1 ] . split ( "." ) [ 0 ] ) model_src = os . path . join ( model_folder , "%s-%i.json" % ( basename , i ) ) model_target = os . path . join ( model_folder , "%s-%i.json" % ( basename , i + 1 ) ) training = model_description [ 'training' ] training = training . replace ( "{{testing}}" , data [ 'testing' ] ) training = training . replace ( "{{training}}" , data [ 'training' ] ) training = training . replace ( "{{validation}}" , data [ 'validating' ] ) training = training . replace ( "{{src_model}}" , model_src ) training = training . replace ( "{{target_model}}" , model_target ) training = training . replace ( "{{nntoolkit}}" , utils . get_nntoolkit ( ) ) return training
Generate a string that contains a command with all necessary parameters to train the model .
59,278
def train_model ( model_folder ) : os . chdir ( model_folder ) training = generate_training_command ( model_folder ) if training is None : return - 1 logging . info ( training ) os . chdir ( model_folder ) os . system ( training )
Train the model in model_folder .
59,279
def main ( model_folder ) : model_description_file = os . path . join ( model_folder , "info.yml" ) with open ( model_description_file , 'r' ) as ymlfile : model_description = yaml . load ( ymlfile ) logging . info ( model_description [ 'model' ] ) data = { } data [ 'training' ] = os . path . join ( model_folder , "traindata.hdf5" ) data [ 'testing' ] = os . path . join ( model_folder , "testdata.hdf5" ) data [ 'validating' ] = os . path . join ( model_folder , "validdata.hdf5" ) train_model ( model_folder )
Main part of the training script .
59,280
def get_bounding_box ( points ) : assert len ( points ) > 0 , "At least one point has to be given." min_x , max_x = points [ 0 ] [ 'x' ] , points [ 0 ] [ 'x' ] min_y , max_y = points [ 0 ] [ 'y' ] , points [ 0 ] [ 'y' ] for point in points : min_x , max_x = min ( min_x , point [ 'x' ] ) , max ( max_x , point [ 'x' ] ) min_y , max_y = min ( min_y , point [ 'y' ] ) , max ( max_y , point [ 'y' ] ) p1 = Point ( min_x , min_y ) p2 = Point ( max_x , max_y ) return BoundingBox ( p1 , p2 )
Get the bounding box of a list of points .
59,281
def do_bb_intersect ( a , b ) : return a . p1 . x <= b . p2 . x and a . p2 . x >= b . p1 . x and a . p1 . y <= b . p2 . y and a . p2 . y >= b . p1 . y
Check if BoundingBox a intersects with BoundingBox b .
59,282
def segments_distance ( segment1 , segment2 ) : assert isinstance ( segment1 , LineSegment ) , "segment1 is not a LineSegment, but a %s" % type ( segment1 ) assert isinstance ( segment2 , LineSegment ) , "segment2 is not a LineSegment, but a %s" % type ( segment2 ) if len ( get_segments_intersections ( segment1 , segment2 ) ) >= 1 : return 0 distances = [ ] distances . append ( point_segment_distance ( segment1 . p1 , segment2 ) ) distances . append ( point_segment_distance ( segment1 . p2 , segment2 ) ) distances . append ( point_segment_distance ( segment2 . p1 , segment1 ) ) distances . append ( point_segment_distance ( segment2 . p2 , segment1 ) ) return min ( distances )
Calculate the distance between two line segments in the plane .
59,283
def perpendicular_distance ( p3 , p1 , p2 ) : px = p2 [ 'x' ] - p1 [ 'x' ] py = p2 [ 'y' ] - p1 [ 'y' ] squared_distance = px * px + py * py if squared_distance == 0 : line_point = Point ( p1 [ 'x' ] , p1 [ 'y' ] ) point = Point ( p3 [ 'x' ] , p3 [ 'y' ] ) return line_point . dist_to ( point ) u = ( ( p3 [ 'x' ] - p1 [ 'x' ] ) * px + ( p3 [ 'y' ] - p1 [ 'y' ] ) * py ) / squared_distance if u > 1 : u = 1 elif u < 0 : u = 0 x = p1 [ 'x' ] + u * px y = p1 [ 'y' ] + u * py dx = x - p3 [ 'x' ] dy = y - p3 [ 'y' ] dist = math . sqrt ( dx * dx + dy * dy ) return dist
Calculate the distance from p3 to the stroke defined by p1 and p2 . The distance is the length of the perpendicular from p3 on p1 .
59,284
def dist_to ( self , p2 ) : return math . hypot ( self . x - p2 . x , self . y - p2 . y )
Measure the distance to another point .
59,285
def get_slope ( self ) : return ( ( self . p1 . y - self . p2 . y ) / ( self . p1 . x - self . p2 . x ) )
Return the slope m of this line segment .
59,286
def get_offset ( self ) : return self . p1 . y - self . get_slope ( ) * self . p1 . x
Get the offset t of this line segment .
59,287
def count_selfintersections ( self ) : counter = 0 for i , j in itertools . combinations ( range ( len ( self . lineSegments ) ) , 2 ) : inters = get_segments_intersections ( self . lineSegments [ i ] , self . lineSegments [ j ] ) if abs ( i - j ) > 1 and len ( inters ) > 0 : counter += 1 return counter
Get the number of self - intersections of this polygonal chain .
59,288
def count_intersections ( self , line_segments_b ) : line_segments_a = self . lineSegments intersection_points = [ ] for line1 , line2 in itertools . product ( line_segments_a , line_segments_b ) : intersection_points += get_segments_intersections ( line1 , line2 ) return len ( set ( intersection_points ) )
Count the intersections of two strokes with each other .
59,289
def get_area ( self ) : return ( self . p2 . x - self . p1 . x ) * ( self . p2 . y - self . p1 . y )
Calculate area of bounding box .
59,290
def get_center ( self ) : return Point ( ( self . p1 . x + self . p2 . x ) / 2.0 , ( self . p1 . y + self . p2 . y ) / 2.0 )
Get the center point of this bounding box .
59,291
def _list_ids ( path_to_data ) : loaded = pickle . load ( open ( path_to_data , "rb" ) ) raw_datasets = loaded [ 'handwriting_datasets' ] raw_ids = { } for raw_dataset in raw_datasets : raw_data_id = raw_dataset [ 'handwriting' ] . raw_data_id if raw_dataset [ 'formula_id' ] not in raw_ids : raw_ids [ raw_dataset [ 'formula_id' ] ] = [ raw_data_id ] else : raw_ids [ raw_dataset [ 'formula_id' ] ] . append ( raw_data_id ) for symbol_id in sorted ( raw_ids ) : print ( "%i: %s" % ( symbol_id , sorted ( raw_ids [ symbol_id ] ) ) )
List raw data IDs grouped by symbol ID from a pickle file path_to_data .
59,292
def _get_system ( model_folder ) : model_description_file = os . path . join ( model_folder , "info.yml" ) if not os . path . isfile ( model_description_file ) : logging . error ( "You are probably not in the folder of a model, because " "%s is not a file. (-m argument)" , model_description_file ) sys . exit ( - 1 ) with open ( model_description_file , 'r' ) as ymlfile : model_desc = yaml . load ( ymlfile ) feature_desc = _get_description ( model_desc ) preprocessing_desc = _get_description ( feature_desc ) return ( preprocessing_desc , feature_desc , model_desc )
Return the preprocessing description the feature description and the model description .
59,293
def display_data ( raw_data_string , raw_data_id , model_folder , show_raw ) : print ( "## Raw Data (ID: %i)" % raw_data_id ) print ( "```" ) print ( raw_data_string ) print ( "```" ) preprocessing_desc , feature_desc , _ = _get_system ( model_folder ) print ( "## Model" ) print ( "%s\n" % model_folder ) tmp = preprocessing_desc [ 'queue' ] preprocessing_queue = preprocessing . get_preprocessing_queue ( tmp ) tmp = feature_desc [ 'features' ] feature_list = features . get_features ( tmp ) preprocessing . print_preprocessing_list ( preprocessing_queue ) features . print_featurelist ( feature_list ) recording = handwritten_data . HandwrittenData ( raw_data_string , raw_data_id = raw_data_id ) if show_raw : recording . show ( ) recording . preprocessing ( preprocessing_queue ) feature_values = recording . feature_extraction ( feature_list ) feature_values = [ round ( el , 3 ) for el in feature_values ] print ( "Features:" ) print ( feature_values ) mult_queue = data_multiplication . get_data_multiplication_queue ( feature_desc [ 'data-multiplication' ] ) training_set = [ { 'id' : 42 , 'formula_id' : 42 , 'formula_in_latex' : 'None' , 'handwriting' : recording } ] training_set = create_ffiles . training_set_multiplication ( training_set , mult_queue ) logging . info ( "Show %i recordings..." , len ( training_set ) ) for recording in training_set : recording [ 'handwriting' ] . show ( )
Print raw_data_id with the content raw_data_string after applying the preprocessing of model_folder to it .
59,294
def main ( list_ids , model , contact_server , raw_data_id , show_raw , mysql_cfg = 'mysql_online' ) : if list_ids : preprocessing_desc , _ , _ = _get_system ( model ) raw_datapath = os . path . join ( utils . get_project_root ( ) , preprocessing_desc [ 'data-source' ] ) _list_ids ( raw_datapath ) else : if contact_server : data = _fetch_data_from_server ( raw_data_id , mysql_cfg ) print ( "hwrt version: %s" % hwrt . __version__ ) if data is not None : display_data ( data [ 'data' ] , data [ 'id' ] , model , show_raw ) else : logging . info ( "RAW_DATA_ID %i does not exist or " "database connection did not work." , raw_data_id ) preprocessing_desc , _ , _ = _get_system ( model ) raw_datapath = os . path . join ( utils . get_project_root ( ) , preprocessing_desc [ 'data-source' ] ) handwriting = _get_data_from_rawfile ( raw_datapath , raw_data_id ) if handwriting is None : logging . info ( "Recording with ID %i was not found in %s" , raw_data_id , raw_datapath ) else : print ( "hwrt version: %s" % hwrt . __version__ ) display_data ( handwriting . raw_data_json , handwriting . formula_id , model , show_raw )
Main function of view . py .
59,295
def get_parameters ( folder ) : with open ( os . path . join ( folder , "info.yml" ) , 'r' ) as ymlfile : preprocessing_description = yaml . load ( ymlfile ) raw_datapath = os . path . join ( utils . get_project_root ( ) , preprocessing_description [ 'data-source' ] ) outputpath = os . path . join ( folder , "data.pickle" ) tmp = preprocessing_description [ 'queue' ] preprocessing_queue = preprocessing . get_preprocessing_queue ( tmp ) return ( raw_datapath , outputpath , preprocessing_queue )
Get the parameters of the preprocessing done within folder .
59,296
def create_preprocessed_dataset ( path_to_data , outputpath , preprocessing_queue ) : logging . info ( "Data soure %s" , path_to_data ) logging . info ( "Output will be stored in %s" , outputpath ) tmp = "Preprocessing Queue:\n" for preprocessing_class in preprocessing_queue : tmp += str ( preprocessing_class ) + "\n" logging . info ( tmp ) if not os . path . isfile ( path_to_data ) : logging . info ( ( "'%s' does not exist. Please either abort this script " "or update the data location." ) , path_to_data ) raw_dataset_path = utils . choose_raw_dataset ( ) raw_dataset_path = "raw-datasets" + raw_dataset_path . split ( "raw-datasets" ) [ 1 ] print ( raw_dataset_path ) sys . exit ( ) logging . info ( "Start loading data..." ) loaded = pickle . load ( open ( path_to_data , "rb" ) ) raw_datasets = loaded [ 'handwriting_datasets' ] logging . info ( "Start applying preprocessing methods" ) start_time = time . time ( ) for i , raw_dataset in enumerate ( raw_datasets ) : if i % 10 == 0 and i > 0 : utils . print_status ( len ( raw_datasets ) , i , start_time ) raw_dataset [ 'handwriting' ] . preprocessing ( preprocessing_queue ) sys . stdout . write ( "\r%0.2f%% (done)\033[K\n" % ( 100 ) ) print ( "" ) pickle . dump ( { 'handwriting_datasets' : raw_datasets , 'formula_id2latex' : loaded [ 'formula_id2latex' ] , 'preprocessing_queue' : preprocessing_queue } , open ( outputpath , "wb" ) , 2 )
Create a preprocessed dataset file by applying preprocessing_queue to path_to_data . The result will be stored in outputpath .
59,297
def main ( folder ) : raw_datapath , outputpath , p_queue = get_parameters ( folder ) create_preprocessed_dataset ( raw_datapath , outputpath , p_queue ) utils . create_run_logfile ( folder )
Main part of preprocess_dataset that glues things togeter .
59,298
def _create_index_formula_lookup ( formula_id2index , feature_folder , index2latex ) : index2formula_id = sorted ( formula_id2index . items ( ) , key = lambda n : n [ 1 ] ) index2formula_file = os . path . join ( feature_folder , "index2formula_id.csv" ) with open ( index2formula_file , "w" ) as f : f . write ( "index,formula_id,latex\n" ) for formula_id , index in index2formula_id : f . write ( "%i,%i,%s\n" % ( index , formula_id , index2latex [ index ] ) )
Create a lookup file where the index is mapped to the formula id and the LaTeX command .
59,299
def main ( feature_folder , create_learning_curve = False ) : with open ( os . path . join ( feature_folder , "info.yml" ) , 'r' ) as ymlfile : feature_description = yaml . load ( ymlfile ) path_to_data = os . path . join ( utils . get_project_root ( ) , feature_description [ 'data-source' ] ) if os . path . isdir ( path_to_data ) : path_to_data = os . path . join ( path_to_data , "data.pickle" ) target_paths = { 'traindata' : os . path . join ( feature_folder , "traindata.hdf5" ) , 'validdata' : os . path . join ( feature_folder , "validdata.hdf5" ) , 'testdata' : os . path . join ( feature_folder , "testdata.hdf5" ) } feature_list = features . get_features ( feature_description [ 'features' ] ) mult_queue = data_multiplication . get_data_multiplication_queue ( feature_description [ 'data-multiplication' ] ) os . chdir ( feature_folder ) logging . info ( "Start creation of hdf5-files..." ) logging . info ( "Get sets from '%s' ..." , path_to_data ) ( training_set , validation_set , test_set , formula_id2index , preprocessing_queue , index2latex ) = get_sets ( path_to_data ) training_set = training_set_multiplication ( training_set , mult_queue ) _create_index_formula_lookup ( formula_id2index , feature_folder , index2latex ) print ( "Classes (nr of symbols): %i" % len ( formula_id2index ) ) preprocessing . print_preprocessing_list ( preprocessing_queue ) features . print_featurelist ( feature_list ) logging . info ( "Start creating hdf5 files" ) input_features = sum ( map ( lambda n : n . get_dimension ( ) , feature_list ) ) for dataset_name , dataset , is_traindata in [ ( "traindata" , training_set , True ) , ( "testdata" , test_set , False ) , ( "validdata" , validation_set , False ) ] : t0 = time . time ( ) logging . info ( "Start preparing '%s' ..." , dataset_name ) prepared , translation = prepare_dataset ( dataset , formula_id2index , feature_list , is_traindata ) logging . info ( "%s length: %i" , dataset_name , len ( prepared ) ) logging . info ( "start 'make_hdf5'x ..." ) make_hdf5 ( dataset_name , input_features , prepared , os . path . join ( feature_folder , target_paths [ dataset_name ] ) , create_learning_curve ) _create_translation_file ( feature_folder , dataset_name , translation , formula_id2index ) t1 = time . time ( ) - t0 logging . info ( "%s was written. Needed %0.2f seconds" , dataset_name , t1 ) gc . collect ( ) utils . create_run_logfile ( feature_folder )
main function of create_ffiles . py