code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
all_models_selected = selection.get_all() # check if all elements selected are on one hierarchy level -> TODO or in future are parts of sibling?! # if not take the state with the most siblings as the copy root parent_m_count_dict = {} for model in all_models_selected: parent_m_count_dict[model.parent] = parent_m_count_dict[model.parent] + 1 if model.parent in parent_m_count_dict else 1 parent_m = None current_count_parent = 0 for possible_parent_m, count in parent_m_count_dict.items(): parent_m = possible_parent_m if current_count_parent < count else parent_m # if root no parent exist and only on model can be selected if len(selection.states) == 1 and selection.get_selected_state().state.is_root_state: parent_m = None # kick all selection except root_state if len(all_models_selected) > 1: selection.set(selection.get_selected_state()) if parent_m is not None: # check and reduce selection for model in all_models_selected: if model.parent is not parent_m: selection.remove(model) return parent_m
def do_selection_reduction_to_one_parent(selection)
Find and reduce selection to one parent state. :param selection: :return: state model which is parent of selection or None if root state
4.981516
4.698059
1.060335
all_models_selected = selection.get_all() if not all_models_selected: logger.warning("Nothing to copy because state machine selection is empty.") return parent_m = self.do_selection_reduction_to_one_parent(selection) self.copy_parent_state_id = parent_m.state.state_id if parent_m else None if smart_selection_adaption: self.do_smart_selection_adaption(selection, parent_m) # store all lists of selection selected_models_dict = {} for state_element_attr in ContainerState.state_element_attrs: selected_models_dict[state_element_attr] = list(getattr(selection, state_element_attr)) # delete old models self.destroy_all_models_in_dict(self.model_copies) # copy all selected elements self.model_copies = deepcopy(selected_models_dict) new_content_of_clipboard = ', '.join(["{0} {1}".format(len(elems), key if len(elems) > 1 else key[:-1]) for key, elems in self.model_copies.items() if elems]) logger.info("The new content is {0}".format(new_content_of_clipboard.replace('_', ' '))) return selected_models_dict, parent_m
def __create_core_and_model_object_copies(self, selection, smart_selection_adaption)
Copy all elements of a selection. The method copies all objects and modifies the selection before copying the elements if the smart flag is true. The smart selection adaption is by default enabled. In any case the selection is reduced to have one parent state that is used as the root of copy, except a root state it self is selected. :param Selection selection: an arbitrary selection, whose elements should be copied .param bool smart_selection_adaption: flag to enable smart selection adaptation mode :return: dictionary of selected models copied, parent model of copy
4.694616
4.350123
1.079192
if target_dict: for model_list in target_dict.values(): if isinstance(model_list, (list, tuple)): for model in model_list: model.prepare_destruction() if model._parent: model._parent = None else: raise Exception("wrong data in clipboard")
def destroy_all_models_in_dict(target_dict)
Method runs the prepare destruction method of models which are assumed in list or tuple as values within a dict
4.757689
4.328961
1.099037
self.destroy_all_models_in_dict(self.model_copies) self.model_copies = None self.copy_parent_state_id = None self.outcome_id_mapping_dict = None self.port_id_mapping_dict = None self.state_id_mapping_dict = None
def destroy(self)
Destroys the clipboard by relieving all model references.
5.614134
5.185019
1.082761
width = extents[2] - extents[0] height = extents[3] - extents[1] add_width = (factor - 1) * width add_height = (factor - 1) * height x1 = extents[0] - add_width / 2 x2 = extents[2] + add_width / 2 y1 = extents[1] - add_height / 2 y2 = extents[3] + add_height / 2 return x1, y1, x2, y2
def extend_extents(extents, factor=1.1)
Extend a given bounding box The bounding box (x1, y1, x2, y2) is centrally stretched by the given factor. :param extents: The bound box extents :param factor: The factor for stretching :return: (x1, y1, x2, y2) of the extended bounding box
1.519349
1.545777
0.982903
from gaphas.item import NW if isinstance(item, ConnectionView): return item.canvas.get_matrix_i2i(item, item.parent).transform_point(*handle.pos) parent = canvas.get_parent(item) if parent: return item.canvas.get_matrix_i2i(item, parent).transform_point(*handle.pos) else: return item.canvas.get_matrix_i2c(item).transform_point(*item.handles()[NW].pos)
def calc_rel_pos_to_parent(canvas, item, handle)
This method calculates the relative position of the given item's handle to its parent :param canvas: Canvas to find relative position in :param item: Item to find relative position to parent :param handle: Handle of item to find relative position to :return: Relative position (x, y)
4.076308
4.963177
0.82131
assert isinstance(bool_list, list) counter = 0 for item in bool_list: if item: counter += 1 return counter == 1
def assert_exactly_one_true(bool_list)
This method asserts that only one value of the provided list is True. :param bool_list: List of booleans to check :return: True if only one value is True, False otherwise
2.570319
3.358982
0.765208
parent = port.parent from rafcon.gui.mygaphas.items.state import StateView if isinstance(parent, StateView): return parent.model.state.state_id
def get_state_id_for_port(port)
This method returns the state ID of the state containing the given port :param port: Port to check for containing state ID :return: State ID of state containing port
14.047472
15.60043
0.900454
from rafcon.gui.mygaphas.items.state import StateView if isinstance(state, StateView): if state.income.handle == handle: return state.income else: for outcome in state.outcomes: if outcome.handle == handle: return outcome for input in state.inputs: if input.handle == handle: return input for output in state.outputs: if output.handle == handle: return output for scoped in state.scoped_variables: if scoped.handle == handle: return scoped
def get_port_for_handle(handle, state)
Looks for and returns the PortView to the given handle in the provided state :param handle: Handle to look for port :param state: State containing handle and port :returns: PortView for handle
4.046385
3.935232
1.028246
from rafcon.gui.mygaphas.items.ports import ScopedVariablePortView, LogicPortView, DataPortView if isinstance(from_port, LogicPortView) and isinstance(to_port, LogicPortView): return add_transition_to_state(from_port, to_port) elif isinstance(from_port, (DataPortView, ScopedVariablePortView)) and \ isinstance(to_port, (DataPortView, ScopedVariablePortView)): return add_data_flow_to_state(from_port, to_port) # Both ports are not None elif from_port and to_port: logger.error("Connection of non-compatible ports: {0} and {1}".format(type(from_port), type(to_port))) return False
def create_new_connection(from_port, to_port)
Checks the type of connection and tries to create it If bot port are logical port,s a transition is created. If both ports are data ports (including scoped variable), then a data flow is added. An error log is created, when the types are not compatible. :param from_port: The starting port of the connection :param to_port: The end point of the connection :return: True if a new connection was added
4.410795
3.623913
1.217136
from rafcon.gui.mygaphas.items.ports import InputPortView, OutputPortView, ScopedVariablePortView from rafcon.gui.models.container_state import ContainerStateModel from_state_v = from_port.parent to_state_v = to_port.parent from_state_m = from_state_v.model to_state_m = to_state_v.model from_state_id = from_state_m.state.state_id to_state_id = to_state_m.state.state_id from_port_id = from_port.port_id to_port_id = to_port.port_id if not isinstance(from_port, (InputPortView, OutputPortView, ScopedVariablePortView)) or \ not isinstance(from_port, (InputPortView, OutputPortView, ScopedVariablePortView)): logger.error("Data flows only exist between data ports (input, output, scope). Given: {0} and {1}".format(type( from_port), type(to_port))) return False responsible_parent_m = None # from parent to child if isinstance(from_state_m, ContainerStateModel) and \ check_if_dict_contains_object_reference_in_values(to_state_m.state, from_state_m.state.states): responsible_parent_m = from_state_m # from child to parent elif isinstance(to_state_m, ContainerStateModel) and \ check_if_dict_contains_object_reference_in_values(from_state_m.state, to_state_m.state.states): responsible_parent_m = to_state_m # from parent to parent elif isinstance(from_state_m, ContainerStateModel) and from_state_m.state is to_state_m.state: responsible_parent_m = from_state_m # == to_state_m # from child to child elif (not from_state_m.state.is_root_state) and (not to_state_m.state.is_root_state) \ and from_state_m.state is not to_state_m.state \ and from_state_m.parent.state.state_id and to_state_m.parent.state.state_id: responsible_parent_m = from_state_m.parent if not isinstance(responsible_parent_m, ContainerStateModel): logger.error("Data flows only exist in container states (e.g. hierarchy states)") return False try: responsible_parent_m.state.add_data_flow(from_state_id, from_port_id, to_state_id, to_port_id) return True except (ValueError, AttributeError, TypeError) as e: logger.error("Data flow couldn't be added: {0}".format(e)) return False
def add_data_flow_to_state(from_port, to_port)
Interface method between Gaphas and RAFCON core for adding data flows The method checks the types of the given ports and their relation. From this the necessary parameters for the add_dat_flow method of the RAFCON core are determined. Also the parent state is derived from the ports. :param from_port: Port from which the data flow starts :param to_port: Port to which the data flow goes to :return: True if a data flow was added, False if an error occurred
2.330264
2.209171
1.054814
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView from_state_v = from_port.parent to_state_v = to_port.parent from_state_m = from_state_v.model to_state_m = to_state_v.model # Gather necessary information to create transition from_state_id = from_state_m.state.state_id to_state_id = to_state_m.state.state_id responsible_parent_m = None # Start transition if isinstance(from_port, IncomeView): from_state_id = None from_outcome_id = None responsible_parent_m = from_state_m # Transition from parent income to child income if isinstance(to_port, IncomeView): to_outcome_id = None # Transition from parent income to parent outcome elif isinstance(to_port, OutcomeView): to_outcome_id = to_port.outcome_id elif isinstance(from_port, OutcomeView): from_outcome_id = from_port.outcome_id # Transition from child outcome to child income if isinstance(to_port, IncomeView): responsible_parent_m = from_state_m.parent to_outcome_id = None # Transition from child outcome to parent outcome elif isinstance(to_port, OutcomeView): responsible_parent_m = to_state_m to_outcome_id = to_port.outcome_id else: raise ValueError("Invalid port type") from rafcon.gui.models.container_state import ContainerStateModel if not responsible_parent_m: logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type( from_port), type(to_port))) return False elif not isinstance(responsible_parent_m, ContainerStateModel): logger.error("Transitions only exist in container states (e.g. hierarchy states)") return False try: t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id) if from_state_id == to_state_id: gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id, combined_action=True) return True except (ValueError, AttributeError, TypeError) as e: logger.error("Transition couldn't be added: {0}".format(e)) return False
def add_transition_to_state(from_port, to_port)
Interface method between Gaphas and RAFCON core for adding transitions The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports. :param from_port: Port from which the transition starts :param to_port: Port to which the transition goes to :return: True if a transition was added, False if an error occurred
2.827312
2.53572
1.114994
handles_list = transition_v.handles() rel_pos_list = [] for handle in handles_list: if handle in transition_v.end_handles(include_waypoints=True): continue rel_pos = transition_v.canvas.get_matrix_i2i(transition_v, transition_v.parent).transform_point(*handle.pos) rel_pos_list.append(rel_pos) return rel_pos_list
def get_relative_positions_of_waypoints(transition_v)
This method takes the waypoints of a connection and returns all relative positions of these waypoints. :param canvas: Canvas to check relative position in :param transition_v: Transition view to extract all relative waypoint positions :return: List with all relative positions of the given transition
4.310843
4.062274
1.06119
from rafcon.gui.mygaphas.items.connection import TransitionView assert isinstance(transition_v, TransitionView) transition_m = transition_v.model waypoint_list = get_relative_positions_of_waypoints(transition_v) if waypoint_list != last_waypoint_list: transition_m.set_meta_data_editor('waypoints', waypoint_list) if publish: graphical_editor_view.emit('meta_data_changed', transition_m, "waypoints", False)
def update_meta_data_for_transition_waypoints(graphical_editor_view, transition_v, last_waypoint_list, publish=True)
This method updates the relative position meta data of the transitions waypoints if they changed :param graphical_editor_view: Graphical Editor the change occurred in :param transition_v: Transition that changed :param last_waypoint_list: List of waypoints before change :param bool publish: Whether to publish the changes using the meta signal
5.994664
5.662714
1.05862
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView, InputPortView, OutputPortView, \ ScopedVariablePortView for port in item.get_all_ports(): if not handle or handle is port.handle: rel_pos = (port.handle.pos.x.value, port.handle.pos.y.value) if isinstance(port, (IncomeView, OutcomeView, InputPortView, OutputPortView, ScopedVariablePortView)): port_m = port.model cur_rel_pos = port_m.get_meta_data_editor()['rel_pos'] if rel_pos != cur_rel_pos: port_m.set_meta_data_editor('rel_pos', rel_pos) if handle: graphical_editor_view.emit('meta_data_changed', port_m, "position", True) else: continue if handle: # If we were supposed to update the meta data of a specific port, we can stop here break
def update_meta_data_for_port(graphical_editor_view, item, handle)
This method updates the meta data of the states ports if they changed. :param graphical_editor_view: Graphical Editor the change occurred in :param item: State the port was moved in :param handle: Handle of moved port or None if all ports are to be updated
4.956243
4.907038
1.010027
from gaphas.item import NW rel_pos = calc_rel_pos_to_parent(graphical_editor_view.editor.canvas, name_v, name_v.handles()[NW]) state_v = graphical_editor_view.editor.canvas.get_parent(name_v) state_v.model.set_meta_data_editor('name.size', (name_v.width, name_v.height)) state_v.model.set_meta_data_editor('name.rel_pos', rel_pos) if publish: graphical_editor_view.emit('meta_data_changed', state_v.model, "name_size", False)
def update_meta_data_for_name_view(graphical_editor_view, name_v, publish=True)
This method updates the meta data of a name view. :param graphical_editor_view: Graphical Editor view the change occurred in :param name_v: The name view which has been changed/moved :param publish: Whether to publish the changes of the meta data
4.440963
5.363721
0.827963
from gaphas.item import NW # Update all port meta data to match with new position and size of parent update_meta_data_for_port(graphical_editor_view, state_v, None) if affects_children: update_meta_data_for_name_view(graphical_editor_view, state_v.name_view, publish=False) for transition_v in state_v.get_transitions(): update_meta_data_for_transition_waypoints(graphical_editor_view, transition_v, None, publish=False) for child_state_v in state_v.child_state_views(): update_meta_data_for_state_view(graphical_editor_view, child_state_v, True, publish=False) rel_pos = calc_rel_pos_to_parent(graphical_editor_view.editor.canvas, state_v, state_v.handles()[NW]) state_v.model.set_meta_data_editor('size', (state_v.width, state_v.height)) state_v.model.set_meta_data_editor('rel_pos', rel_pos) if publish: graphical_editor_view.emit('meta_data_changed', state_v.model, "size", affects_children)
def update_meta_data_for_state_view(graphical_editor_view, state_v, affects_children=False, publish=True)
This method updates the meta data of a state view :param graphical_editor_view: Graphical Editor view the change occurred in :param state_v: The state view which has been changed/moved :param affects_children: Whether the children of the state view have been resized or not :param publish: Whether to publish the changes of the meta data
3.258823
3.541764
0.920113
self.canvas.get_first_view().unselect_item(self) for child in self.canvas.get_children(self)[:]: child.remove() self.remove_income() for outcome_v in self.outcomes[:]: self.remove_outcome(outcome_v) for input_port_v in self.inputs[:]: self.remove_input_port(input_port_v) for output_port_v in self.outputs[:]: self.remove_output_port(output_port_v) for scoped_variable_port_v in self.scoped_variables[:]: self.remove_scoped_variable(scoped_variable_port_v) self.remove_keep_rect_within_constraint_from_parent() for constraint in self._constraints[:]: self.canvas.solver.remove_constraint(constraint) self._constraints.remove(constraint) self.canvas.remove(self)
def remove(self)
Remove recursively all children and then the StateView itself
3.476536
3.423301
1.015551
for child_state_v in self.child_state_views(): self.keep_rect_constraints[child_state_v].enable = enable child_state_v.keep_rect_constraints[child_state_v._name_view].enable = enable
def set_enable_flag_keep_rect_within_constraints(self, enable)
Enable/disables the KeepRectangleWithinConstraint for child states
6.103685
4.788126
1.274754
if isinstance(self.model, LibraryStateModel) and self.model.show_content(): return not with_content or isinstance(self.model.state_copy, ContainerStateModel) return False
def show_content(self, with_content=False)
Checks if the state is a library with the `show_content` flag set :param with_content: If this parameter is `True`, the method return only True if the library represents a ContainerState :return: Whether the content of a library state is shown
10.545587
6.820064
1.546259
if port_width is None: port_width = 2 * self.border_width border_size = self.border_width pos = 0.5 * border_size + port_num * port_width outermost_pos = max(side_length / 2., side_length - 0.5 * border_size - port_width) pos = min(pos, outermost_pos) return pos
def _calculate_port_pos_on_line(self, port_num, side_length, port_width=None)
Calculate the position of a port on a line The position depends on the number of element. Elements are equally spaced. If the end of the line is reached, ports are stacked. :param int port_num: The number of the port of that type :param float side_length: The length of the side the element is placed on :param float port_width: The width of one port :return: The position on the line for the given port :rtype: float
3.398944
3.372247
1.007917
with open(path, 'w') as f: yaml.dump(dictionary, f, indent=4, **kwargs)
def write_dict_to_yaml(dictionary, path, **kwargs)
Writes a dictionary to a yaml file :param dictionary: the dictionary to be written :param path: the absolute path of the target yaml file :param kwargs: optional additional parameters for dumper
2.432746
3.112889
0.781507
f = file(path, 'r') dictionary = yaml.load(f) f.close() return dictionary
def load_dict_from_yaml(path)
Loads a dictionary from a yaml file :param path: the absolute path of the target yaml file :return:
2.940466
3.617183
0.812916
result_string = json.dumps(dictionary, cls=JSONObjectEncoder, indent=4, check_circular=False, sort_keys=True, **kwargs) with open(path, 'w') as f: # We cannot write directly to the file, as otherwise the 'encode' method wouldn't be called f.write(result_string)
def write_dict_to_json(dictionary, path, **kwargs)
Write a dictionary to a json file. :param path: The relative path to save the dictionary to :param dictionary: The dictionary to get saved :param kwargs: optional additional parameters for dumper
4.806969
5.147366
0.93387
f = open(path, 'r') if as_dict: result = json.load(f) else: result = json.load(f, cls=JSONObjectDecoder, substitute_modules=substitute_modules) f.close() return result
def load_objects_from_json(path, as_dict=False)
Loads a dictionary from a json file. :param path: The relative path of the json file. :return: The dictionary specified in the json file
3.703652
4.727392
0.783445
if not self.enable: return margin = self.margin_method() def parent_width(): return self.parent_se[0].value - self.parent_nw[0].value def parent_height(): return self.parent_se[1].value - self.parent_nw[1].value def child_width(): child_width = self.child_se[0].value - self.child_nw[0].value if child_width > parent_width() - 2 * margin: child_width = parent_width() - 2 * margin return max(self.child.min_width, child_width) def child_height(): child_height = self.child_se[1].value - self.child_nw[1].value if child_height > parent_height() - 2 * margin: child_height = parent_height() - 2 * margin return max(self.child.min_height, child_height) updated = False # Left edge (west) if self.parent_nw[0].value > self.child_nw[0].value - margin + EPSILON: width = child_width() _update(self.child_nw[0], self.parent_nw[0].value + margin) _update(self.child_se[0], self.child_nw[0].value + width) updated = True # Right edge (east) elif self.parent_se[0].value < self.child_se[0].value + margin - EPSILON: width = child_width() _update(self.child_se[0], self.parent_se[0].value - margin) _update(self.child_nw[0], self.child_se[0].value - width) updated = True # Upper edge (north) if self.parent_nw[1].value > self.child_nw[1].value - margin + EPSILON: height = child_height() _update(self.child_nw[1], self.parent_nw[1].value + margin) _update(self.child_se[1], self.child_nw[1].value + height) updated = True # Lower edge (south) elif self.parent_se[1].value < self.child_se[1].value + margin - EPSILON: height = child_height() _update(self.child_se[1], self.parent_se[1].value - margin) _update(self.child_nw[1], self.child_se[1].value - height) updated = True from rafcon.gui.mygaphas.items.state import StateView if updated and isinstance(self.child, StateView): self.child.update_minimum_size_of_children()
def solve_for(self, var=None)
Ensure that the children is within its parent
1.83378
1.796406
1.020805
margin = self.margin_method() if self.parent_nw[0].value > self.child[0].value - margin: _update(self.child[0], self.parent_nw[0].value + margin) # Right edge (east) if self.parent_se[0].value < self.child[0].value + margin: _update(self.child[0], self.parent_se[0].value - margin) # Upper edge (north) if self.parent_nw[1].value > self.child[1].value - margin: _update(self.child[1], self.parent_nw[1].value + margin) # Lower edge (south) if self.parent_se[1].value < self.child[1].value + margin: _update(self.child[1], self.parent_se[1].value - margin)
def solve_for(self, var=None)
Ensure that the children is within its parent
2.243423
2.077053
1.080099
from rafcon.utils.geometry import point_left_of_line p = (self._initial_pos.x, self._initial_pos.y) nw_x, nw_y, se_x, se_y = self.get_adjusted_border_positions() if point_left_of_line(p, (nw_x, nw_y), (se_x, se_y)): # upper right triangle of state if point_left_of_line(p, (nw_x, se_y), (se_x, nw_y)): # upper quarter triangle of state self._port.side = SnappedSide.TOP self.limit_pos(p[0], se_x, nw_x) else: # right quarter triangle of state self._port.side = SnappedSide.RIGHT self.limit_pos(p[1], se_y, nw_y) else: # lower left triangle of state if point_left_of_line(p, (nw_x, se_y), (se_x, nw_y)): # left quarter triangle of state self._port.side = SnappedSide.LEFT self.limit_pos(p[1], se_y, nw_y) else: # lower quarter triangle of state self._port.side = SnappedSide.BOTTOM self.limit_pos(p[0], se_x, nw_x) self.set_nearest_border()
def update_port_side(self)
Updates the initial position of the port The port side is ignored but calculated from the port position. Then the port position is limited to the four side lines of the state.
2.425535
2.272243
1.067463
# As the size of the containing state may has changed we need to update the distance to the border self.update_distance_to_border() px, py = self._point nw_x, nw_y, se_x, se_y = self.get_adjusted_border_positions() # If the port is located in one of the corners it is possible to move in two directions if ((self._initial_pos.x == nw_x and self._initial_pos.y == nw_y) or (self._initial_pos.x == se_x and self._initial_pos.y == nw_y) or (self._initial_pos.x == se_x and self._initial_pos.y == se_y) or (self._initial_pos.x == nw_x and self._initial_pos.y == se_y)): self.limit_pos(px, se_x, nw_x) self.limit_pos(py, se_y, nw_y) # If port movement starts at LEFT position, keep X position at place and move Y elif self._initial_pos.x == nw_x: _update(px, nw_x) self.limit_pos(py, se_y, nw_y) self._port.side = SnappedSide.LEFT # If port movement starts at TOP position, keep Y position at place and move X elif self._initial_pos.y == nw_y: _update(py, nw_y) self.limit_pos(px, se_x, nw_x) self._port.side = SnappedSide.TOP # If port movement starts at RIGHT position, keep X position at place and move Y elif self._initial_pos.x == se_x: _update(px, se_x) self.limit_pos(py, se_y, nw_y) self._port.side = SnappedSide.RIGHT # If port movement starts at BOTTOM position, keep Y position at place and move X elif self._initial_pos.y == se_y: _update(py, se_y) self.limit_pos(px, se_x, nw_x) self._port.side = SnappedSide.BOTTOM # If containing state has been resized, snap ports accordingly to border else: self.set_nearest_border() # Update initial position for next reference _update(self._initial_pos.x, deepcopy(px.value)) _update(self._initial_pos.y, deepcopy(py.value))
def _solve(self)
Calculates the correct position of the port and keeps it aligned with the binding rect
2.510535
2.442337
1.027924
if p > se_pos: _update(p, se_pos) elif p < nw_pos: _update(p, nw_pos)
def limit_pos(p, se_pos, nw_pos)
Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return:
3.232104
3.82627
0.844714
nw_x, nw_y = self._rect[0] se_x, se_y = self._rect[1] nw_x += self._distance_to_border nw_y += self._distance_to_border se_x -= self._distance_to_border se_y -= self._distance_to_border return nw_x, nw_y, se_x, se_y
def get_adjusted_border_positions(self)
Calculates the positions to limit the port movement to :return: Adjusted positions nw_x, nw_y, se_x, se_y
2.129606
2.056774
1.03541
px, py = self._point nw_x, nw_y, se_x, se_y = self.get_adjusted_border_positions() if self._port.side == SnappedSide.RIGHT: _update(px, se_x) elif self._port.side == SnappedSide.BOTTOM: _update(py, se_y) elif self._port.side == SnappedSide.LEFT: _update(px, nw_x) elif self._port.side == SnappedSide.TOP: _update(py, nw_y)
def set_nearest_border(self)
Snaps the port to the correct side upon state size change
2.750453
2.467379
1.114727
elements_in_folder = os.listdir(path) # find all state folder elements in system path state_folders_in_file_system = [] for folder_name in elements_in_folder: if os.path.exists(os.path.join(path, folder_name, FILE_NAME_CORE_DATA)) or \ os.path.exists(os.path.join(path, folder_name, FILE_NAME_CORE_DATA_OLD)): state_folders_in_file_system.append(folder_name) # remove elements used by existing states and storage format for state in states: storage_folder_for_state = get_storage_id_for_state(state) if storage_folder_for_state in state_folders_in_file_system: state_folders_in_file_system.remove(storage_folder_for_state) # remove the remaining state folders for folder_name in state_folders_in_file_system: shutil.rmtree(os.path.join(path, folder_name))
def remove_obsolete_folders(states, path)
Removes obsolete state machine folders This function removes all folders in the file system folder `path` that do not belong to the states given by `states`. :param list states: the states that should reside in this very folder :param str path: the file system path to be checked for valid folders
2.571567
2.583988
0.995193
def warning_logger_message(insert_string): not_allowed_characters = "'" + "', '".join(REPLACED_CHARACTERS_FOR_NO_OS_LIMITATION.keys()) + "'" logger.warning("Deprecated {2} in {0}. Please avoid to use the following characters {1}." "".format(base_path, not_allowed_characters, insert_string)) from rafcon.core.singleton import library_manager if library_manager.is_os_path_within_library_root_paths(base_path): library_path, library_name = library_manager.get_library_path_and_name_for_os_path(base_path) clean_library_path = clean_path(library_path) clean_library_name = clean_path(library_name) if library_name != clean_library_name or library_path != clean_library_path: warning_logger_message("library path") library_root_key = library_manager._get_library_root_key_for_os_path(base_path) library_root_path = library_manager._library_root_paths[library_root_key] clean_base_path = os.path.join(library_root_path, clean_library_path, clean_library_name) else: path_elements = base_path.split(os.path.sep) state_machine_folder_name = base_path.split(os.path.sep)[-1] path_elements[-1] = clean_path(state_machine_folder_name) if not state_machine_folder_name == path_elements[-1]: warning_logger_message("state machine folder name") clean_base_path = os.path.sep.join(path_elements) return clean_base_path
def clean_path_from_deprecated_naming(base_path)
Checks if the base path includes deprecated characters/format and returns corrected version The state machine folder name should be according the universal RAFCON path format. In case the state machine path is inside a mounted library_root_path also the library_path has to have this format. The library path is a partial path of the state machine path. This rules are followed to always provide secure paths for RAFCON and all operating systems. :param base_path: :return: cleaned base_path :rtype: str
2.899089
2.608264
1.111501
path_elements = base_path.split(os.path.sep) reduced_path_elements = [clean_path_element(elem, max_length=255) for elem in path_elements] if not all(path_elements[i] == elem for i, elem in enumerate(reduced_path_elements)): # logger.info("State machine storage path is reduced") base_path = os.path.sep.join(reduced_path_elements) return base_path
def clean_path(base_path)
This function cleans a file system path in terms of removing all not allowed characters of each path element. A path element is an element of a path between the path separator of the operating system. :param base_path: the path to be cleaned :return: the clean path
3.529894
3.573163
0.987891
# warns the user in the logger when using deprecated names clean_path_from_deprecated_naming(base_path) state_machine.acquire_modification_lock() try: root_state = state_machine.root_state # clean old path first if delete_old_state_machine: if os.path.exists(base_path): shutil.rmtree(base_path) # Ensure that path is existing if not os.path.exists(base_path): os.makedirs(base_path) old_update_time = state_machine.last_update state_machine.last_update = storage_utils.get_current_time_string() state_machine_dict = state_machine.to_dict() storage_utils.write_dict_to_json(state_machine_dict, os.path.join(base_path, STATEMACHINE_FILE)) # set the file_system_path of the state machine if not as_copy: state_machine.file_system_path = copy.copy(base_path) else: state_machine.last_update = old_update_time # add root state recursively remove_obsolete_folders([root_state], base_path) save_state_recursively(root_state, base_path, "", as_copy) if state_machine.marked_dirty and not as_copy: state_machine.marked_dirty = False logger.debug("State machine with id {0} was saved at {1}".format(state_machine.state_machine_id, base_path)) except Exception: raise finally: state_machine.release_modification_lock()
def save_state_machine_to_path(state_machine, base_path, delete_old_state_machine=False, as_copy=False)
Saves a state machine recursively to the file system The `as_copy` flag determines whether the state machine is saved as copy. If so (`as_copy=True`), some state machine attributes will be left untouched, such as the `file_system_path` or the `dirty_flag`. :param rafcon.core.state_machine.StateMachine state_machine: the state_machine to be saved :param str base_path: base_path to which all further relative paths refers to :param bool delete_old_state_machine: Whether to delete any state machine existing at the given path :param bool as_copy: Whether to use a copy storage for the state machine
3.33403
3.269015
1.019888
from rafcon.core.states.execution_state import ExecutionState if isinstance(state, ExecutionState): source_script_file = os.path.join(state.script.path, state.script.filename) destination_script_file = os.path.join(state_path_full, SCRIPT_FILE) try: write_file(destination_script_file, state.script_text) except Exception: logger.exception("Storing of script file failed: {0} -> {1}".format(state.get_path(), destination_script_file)) raise if not source_script_file == destination_script_file and not as_copy: state.script.filename = SCRIPT_FILE state.script.path = state_path_full
def save_script_file_for_state_and_source_path(state, state_path_full, as_copy=False)
Saves the script file for a state to the directory of the state. The script name will be set to the SCRIPT_FILE constant. :param state: The state of which the script file should be saved :param str state_path_full: The path to the file system storage location of the state :param bool as_copy: Temporary storage flag to signal that the given path is not the new file_system_path
3.192759
3.156216
1.011578
destination_script_file = os.path.join(state_path_full, SEMANTIC_DATA_FILE) try: storage_utils.write_dict_to_json(state.semantic_data, destination_script_file) except IOError: logger.exception("Storing of semantic data for state {0} failed! Destination path: {1}". format(state.get_path(), destination_script_file)) raise
def save_semantic_data_for_state(state, state_path_full)
Saves the semantic data in a separate json file. :param state: The state of which the script file should be saved :param str state_path_full: The path to the file system storage location of the state
3.769246
3.876056
0.972444
from rafcon.core.states.execution_state import ExecutionState from rafcon.core.states.container_state import ContainerState state_path = os.path.join(parent_path, get_storage_id_for_state(state)) state_path_full = os.path.join(base_path, state_path) if not os.path.exists(state_path_full): os.makedirs(state_path_full) storage_utils.write_dict_to_json(state, os.path.join(state_path_full, FILE_NAME_CORE_DATA)) if not as_copy: state.file_system_path = state_path_full if isinstance(state, ExecutionState): save_script_file_for_state_and_source_path(state, state_path_full, as_copy) save_semantic_data_for_state(state, state_path_full) # create yaml files for all children if isinstance(state, ContainerState): remove_obsolete_folders(state.states.values(), os.path.join(base_path, state_path)) for state in state.states.values(): save_state_recursively(state, base_path, state_path, as_copy)
def save_state_recursively(state, base_path, parent_path, as_copy=False)
Recursively saves a state to a json file It calls this method on all its substates. :param state: State to be stored :param base_path: Path to the state machine :param parent_path: Path to the parent state :param bool as_copy: Temporary storage flag to signal that the given path is not the new file_system_path :return:
2.97232
2.893982
1.027069
from rafcon.core.states.execution_state import ExecutionState from rafcon.core.states.container_state import ContainerState from rafcon.core.states.hierarchy_state import HierarchyState path_core_data = os.path.join(state_path, FILE_NAME_CORE_DATA) logger.debug("Load state recursively: {0}".format(str(state_path))) # TODO: Should be removed with next minor release if not os.path.exists(path_core_data): path_core_data = os.path.join(state_path, FILE_NAME_CORE_DATA_OLD) try: state_info = load_data_file(path_core_data) except ValueError as e: logger.exception("Error while loading state data: {0}".format(e)) return except LibraryNotFoundException as e: logger.error("Library could not be loaded: {0}\n" "Skipping library and continuing loading the state machine".format(e)) state_info = storage_utils.load_objects_from_json(path_core_data, as_dict=True) state_id = state_info["state_id"] dummy_state = HierarchyState(LIBRARY_NOT_FOUND_DUMMY_STATE_NAME, state_id=state_id) # set parent of dummy state if isinstance(parent, ContainerState): parent.add_state(dummy_state, storage_load=True) else: dummy_state.parent = parent return dummy_state # Transitions and data flows are not added when loading a state, as also states are not added. # We have to wait until the child states are loaded, before adding transitions and data flows, as otherwise the # validity checks for transitions and data flows would fail if not isinstance(state_info, tuple): state = state_info else: state = state_info[0] transitions = state_info[1] data_flows = state_info[2] # set parent of state if parent is not None and isinstance(parent, ContainerState): parent.add_state(state, storage_load=True) else: state.parent = parent # read script file if an execution state if isinstance(state, ExecutionState): script_text = read_file(state_path, state.script.filename) state.script_text = script_text # load semantic data try: semantic_data = load_data_file(os.path.join(state_path, SEMANTIC_DATA_FILE)) state.semantic_data = semantic_data except Exception as e: # semantic data file does not have to be there pass one_of_my_child_states_not_found = False # load child states for p in os.listdir(state_path): child_state_path = os.path.join(state_path, p) if os.path.isdir(child_state_path): child_state = load_state_recursively(state, child_state_path, dirty_states) if child_state.name is LIBRARY_NOT_FOUND_DUMMY_STATE_NAME: one_of_my_child_states_not_found = True if one_of_my_child_states_not_found: # omit adding transitions and data flows in this case pass else: # Now we can add transitions and data flows, as all child states were added if isinstance(state_info, tuple): state.transitions = transitions state.data_flows = data_flows state.file_system_path = state_path if state.marked_dirty: dirty_states.append(state) return state
def load_state_recursively(parent, state_path=None, dirty_states=[])
Recursively loads the state It calls this method on each sub-state of a container state. :param parent: the root state of the last load call to which the loaded state will be added :param state_path: the path on the filesystem where to find the meta file for the state :param dirty_states: a dict of states which changed during loading :return:
3.176589
3.147908
1.009111
if os.path.exists(path_of_file): return storage_utils.load_objects_from_json(path_of_file) raise ValueError("Data file not found: {0}".format(path_of_file))
def load_data_file(path_of_file)
Loads the content of a file by using json.load. :param path_of_file: the path of the file to load :return: the file content as a string :raises exceptions.ValueError: if the file was not found
3.20917
4.080544
0.786456
if max_length is not None: if isinstance(text, string_types) and len(text) > max_length: max_length = int(max_length) half_length = float(max_length - 1) / 2 return text[:int(math.ceil(half_length))] + separator + text[-int(math.floor(half_length)):] return text
def limit_text_max_length(text, max_length, separator='_')
Limits the length of a string. The returned string will be the first `max_length/2` characters of the input string plus a separator plus the last `max_length/2` characters of the input string. :param text: the text to be limited :param max_length: the maximum length of the output string :param separator: the separator between the first "max_length"/2 characters of the input string and the last "max_length/2" characters of the input string :return: the shortened input string
2.477576
2.614596
0.947594
elements_to_replace = REPLACED_CHARACTERS_FOR_NO_OS_LIMITATION for elem, replace_with in elements_to_replace.items(): text = text.replace(elem, replace_with) if max_length is not None: text = limit_text_max_length(text, max_length, separator) return text
def clean_path_element(text, max_length=None, separator='_')
Replace characters that conflict with a free OS choice when in a file system path. :param text: the string to be cleaned :param max_length: the maximum length of the output string :param separator: the separator used for rafcon.core.storage.storage.limit_text_max_length :return:
3.8258
3.380095
1.131861
# TODO: Should there not only be one method i.e. either this one or "clean_path_element" elements_to_replace = {' ': '_', '*': '_'} for elem, replace_with in elements_to_replace.items(): text = text.replace(elem, replace_with) text = re.sub('[^a-zA-Z0-9-_]', '', text) if max_length is not None: text = limit_text_max_length(text, max_length, separator) return text
def limit_text_to_be_path_element(text, max_length=None, separator='_')
Replace characters that are not in the valid character set of RAFCON. :param text: the string to be cleaned :param max_length: the maximum length of the output string :param separator: the separator used for rafcon.core.storage.storage.limit_text_max_length :return:
4.118949
4.013841
1.026187
if global_config.get_config_value('STORAGE_PATH_WITH_STATE_NAME'): max_length = global_config.get_config_value('MAX_LENGTH_FOR_STATE_NAME_IN_STORAGE_PATH') max_length_of_state_name_in_folder_name = 255 - len(ID_NAME_DELIMITER + state.state_id) # TODO: should we allow "None" in config file? if max_length is None or max_length == "None" or max_length > max_length_of_state_name_in_folder_name: if max_length_of_state_name_in_folder_name < len(state.name): logger.info("The storage folder name is forced to be maximal 255 characters in length.") max_length = max_length_of_state_name_in_folder_name return limit_text_to_be_path_element(state.name, max_length) + ID_NAME_DELIMITER + state.state_id else: return state.state_id
def get_storage_id_for_state(state)
Calculates the storage id of a state. This ID can be used for generating the file path for a state. :param rafcon.core.states.state.State state: state the storage_id should is composed for
3.501164
3.592907
0.974466
text_buffer = self.get_buffer() # not needed if the right side bar is un-docked from rafcon.gui.singleton import main_window_controller if main_window_controller is None or main_window_controller.view is None: return from rafcon.gui.runtime_config import global_runtime_config if global_runtime_config.get_config_value('RIGHT_BAR_WINDOW_UNDOCKED'): return # move the pane left if the cursor is to far right and the pane position is less then 440 from its max position button_container_min_width = self.button_container_min_width width_of_all = button_container_min_width + self.tab_width text_view_width = button_container_min_width - self.line_numbers_width min_line_string_length = float(button_container_min_width)/float(self.source_view_character_size) current_pane_pos = main_window_controller.view['right_h_pane'].get_property('position') max_position = main_window_controller.view['right_h_pane'].get_property('max_position') pane_rel_pos = main_window_controller.view['right_h_pane'].get_property('max_position') - current_pane_pos if pane_rel_pos >= width_of_all + self.line_numbers_width: pass else: cursor_line_offset = text_buffer.get_iter_at_offset(text_buffer.props.cursor_position).get_line_offset() needed_rel_pos = text_view_width/min_line_string_length*cursor_line_offset \ + self.tab_width + self.line_numbers_width needed_rel_pos = min(width_of_all, needed_rel_pos) if pane_rel_pos >= needed_rel_pos: pass else: main_window_controller.view['right_h_pane'].set_property('position', max_position - needed_rel_pos) spacer_width = int(width_of_all + self.line_numbers_width - needed_rel_pos) self.spacer_frame.set_size_request(width=spacer_width, height=-1)
def pane_position_check(self)
Update right bar pane position if needed Checks calculates if the cursor is still visible and updates the pane position if it is close to not be seen. In case of an un-docked right-bar this method does nothing. :return:
3.729852
3.522391
1.058898
last_history_item = self.get_last_history_item() from rafcon.core.states.library_state import LibraryState # delayed imported on purpose if isinstance(state_for_scoped_data, LibraryState): state_for_scoped_data = state_for_scoped_data.state_copy return_item = CallItem(state, last_history_item, call_type, state_for_scoped_data, input_data, state.run_id) return self._push_item(last_history_item, return_item)
def push_call_history_item(self, state, call_type, state_for_scoped_data, input_data=None)
Adds a new call-history-item to the history item list A call history items stores information about the point in time where a method (entry, execute, exit) of certain state was called. :param state: the state that was called :param call_type: the call type of the execution step, i.e. if it refers to a container state or an execution state :param state_for_scoped_data: the state of which the scoped data needs to be saved for further usages (e.g. backward stepping)
4.456224
4.827272
0.923135
last_history_item = self.get_last_history_item() from rafcon.core.states.library_state import LibraryState # delayed imported on purpose if isinstance(state_for_scoped_data, LibraryState): state_for_scoped_data = state_for_scoped_data.state_copy return_item = ReturnItem(state, last_history_item, call_type, state_for_scoped_data, output_data, state.run_id) return self._push_item(last_history_item, return_item)
def push_return_history_item(self, state, call_type, state_for_scoped_data, output_data=None)
Adds a new return-history-item to the history item list A return history items stores information about the point in time where a method (entry, execute, exit) of certain state returned. :param state: the state that returned :param call_type: the call type of the execution step, i.e. if it refers to a container state or an execution state :param state_for_scoped_data: the state of which the scoped data needs to be saved for further usages (e.g. backward stepping)
4.199347
4.549906
0.922952
last_history_item = self.get_last_history_item() return_item = ConcurrencyItem(state, self.get_last_history_item(), number_concurrent_threads, state.run_id, self.execution_history_storage) return self._push_item(last_history_item, return_item)
def push_concurrency_history_item(self, state, number_concurrent_threads)
Adds a new concurrency-history-item to the history item list A concurrent history item stores information about the point in time where a certain number of states is launched concurrently (e.g. in a barrier concurrency state). :param state: the state that launches the state group :param number_concurrent_threads: the number of states that are launched
4.22658
5.237837
0.806932
if isinstance(object_, Hashable): object_.update_hash(obj_hash) elif isinstance(object_, (list, set, tuple)): if isinstance(object_, set): # A set is not ordered object_ = sorted(object_) for element in object_: Hashable.update_hash_from_dict(obj_hash, element) elif isinstance(object_, dict): for key in sorted(object_.keys()): # A dict is not ordered Hashable.update_hash_from_dict(obj_hash, key) Hashable.update_hash_from_dict(obj_hash, object_[key]) else: obj_hash.update(Hashable.get_object_hash_string(object_))
def update_hash_from_dict(obj_hash, object_)
Updates an existing hash object with another Hashable, list, set, tuple, dict or stringifyable object :param obj_hash: The hash object (see Python hashlib documentation) :param object_: The value that should be added to the hash (can be another Hashable or a dictionary)
2.072728
2.058878
1.006727
if obj_hash is None: obj_hash = hashlib.sha256() self.update_hash(obj_hash) return obj_hash
def mutable_hash(self, obj_hash=None)
Creates a hash with the (im)mutable data fields of the object Example: >>> my_obj = type("MyDerivedClass", (Hashable,), { "update_hash": lambda self, h: h.update("RAFCON") })() >>> my_obj_hash = my_obj.mutable_hash() >>> print('Hash: ' + my_obj_hash.hexdigest()) Hash: c8b2e32dcb31c5282e4b9dbc6a9975b65bf59cd80a7cee66d195e320484df5c6 :param obj_hash: The hash object (see Python hashlib) :return: The updated hash object
2.883207
2.462595
1.1708
try: # Shorten the source name of the record (remove rafcon.) if sys.version_info >= (2, 7): record.__setattr__("name", record.name.replace("rafcon.", "")) msg = self.format(record) fs = "%s" try: ufs = u'%s' try: entry = ufs % msg except UnicodeEncodeError: entry = fs % msg except UnicodeError: entry = fs % msg for logging_view in self._logging_views.values(): logging_view.print_message(entry, record.levelno) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
def emit(self, record)
Logs a new record If a logging view is given, it is used to log the new record to. The code is partially copied from the StreamHandler class. :param record: :return:
4.517879
4.315311
1.046942
meta_gaphas = self.meta['gui']['editor_gaphas'] meta_opengl = self.meta['gui']['editor_opengl'] assert isinstance(meta_gaphas, Vividict) and isinstance(meta_opengl, Vividict) # Use meta data of editor with more keys (typically one of the editors has zero keys) # TODO check if the magic length condition in the next line can be improved (consistent behavior getter/setter?) parental_conversion_from_opengl = self._parent and self._parent().temp['conversion_from_opengl'] from_gaphas = len(meta_gaphas) > len(meta_opengl) or (len(meta_gaphas) == len(meta_opengl) and for_gaphas and not parental_conversion_from_opengl) # Convert meta data if meta data target and origin differ if from_gaphas and not for_gaphas: self.meta['gui']['editor_opengl'] = self._meta_data_editor_gaphas2opengl(meta_gaphas) elif not from_gaphas and for_gaphas: self.meta['gui']['editor_gaphas'] = self._meta_data_editor_opengl2gaphas(meta_opengl) # only keep meta data for one editor del self.meta['gui']['editor_opengl' if for_gaphas else 'editor_gaphas'] return self.meta['gui']['editor_gaphas'] if for_gaphas else self.meta['gui']['editor_opengl']
def get_meta_data_editor(self, for_gaphas=True)
Returns the editor for the specified editor This method should be used instead of accessing the meta data of an editor directly. It return the meta data of the editor available (with priority to the one specified by `for_gaphas`) and converts it if needed. :param bool for_gaphas: True (default) if the meta data is required for gaphas, False if for OpenGL :return: Meta data for the editor :rtype: Vividict
4.536332
4.129571
1.0985
self.do_convert_meta_data_if_no_data(from_gaphas) meta_gui = self.meta['gui'] meta_gui = meta_gui['editor_gaphas'] if from_gaphas else meta_gui['editor_opengl'] key_path = key.split('.') for key in key_path: if isinstance(meta_gui, list): meta_gui[int(key)] = meta_data break if key == key_path[-1]: meta_gui[key] = meta_data else: meta_gui = meta_gui[key] return self.get_meta_data_editor(for_gaphas=from_gaphas)
def set_meta_data_editor(self, key, meta_data, from_gaphas=True)
Sets the meta data for a specific key of the desired editor :param str key: The meta data key, separated by dots if it is nested :param meta_data: The value to be set :param bool from_gaphas: If the data comes from a gaphas editor
3.451505
3.542229
0.974388
if obj_hash is None: obj_hash = hashlib.sha256() self.update_meta_data_hash(obj_hash) return obj_hash
def meta_data_hash(self, obj_hash=None)
Creates a hash with the meta data of the model :param obj_hash: The hash object (see Python hashlib) :return: The updated hash object
2.694285
2.568113
1.04913
self._Observer__PROP_TO_METHS.clear() self._Observer__METH_TO_PROPS.clear() self._Observer__PAT_TO_METHS.clear() self._Observer__METH_TO_PAT.clear() self._Observer__PAT_METH_TO_KWARGS.clear()
def prepare_destruction(self)
Prepares the model for destruction
4.703396
4.982898
0.943908
if isinstance(value, string_types) and len(value) > constants.MAX_VALUE_LABEL_TEXT_LENGTH: value = value[:constants.MAX_VALUE_LABEL_TEXT_LENGTH] + "..." final_string = " " + value + " " elif isinstance(value, (dict, list)) and len(str(value)) > constants.MAX_VALUE_LABEL_TEXT_LENGTH: value_text = str(value)[:constants.MAX_VALUE_LABEL_TEXT_LENGTH] + "..." final_string = " " + value_text + " " else: final_string = " " + str(value) + " " return final_string
def limit_value_string_length(value)
This method limits the string representation of the value to MAX_VALUE_LABEL_TEXT_LENGTH + 3 characters. :param value: Value to limit string representation :return: String holding the value with a maximum length of MAX_VALUE_LABEL_TEXT_LENGTH + 3
2.053464
1.995792
1.028897
r, g, b = color.red, color.green, color.blue # Convert from 0-6535 to 0-1 r /= 65535. g /= 65535. b /= 65535. if transparency is not None or opacity is None: transparency = 0 if transparency is None else transparency # default value if transparency < 0 or transparency > 1: raise ValueError("Transparency must be between 0 and 1") alpha = 1 - transparency else: if opacity < 0 or opacity > 1: raise ValueError("Opacity must be between 0 and 1") alpha = opacity return r, g, b, alpha
def get_col_rgba(color, transparency=None, opacity=None)
This class converts a Gdk.Color into its r, g, b parts and adds an alpha according to needs If both transparency and opacity is None, alpha is set to 1 => opaque :param Gdk.Color color: Color to extract r, g and b from :param float | None transparency: Value between 0 (opaque) and 1 (transparent) or None if opacity is to be used :param float | None opacity: Value between 0 (transparent) and 1 (opaque) or None if transparency is to be used :return: Red, Green, Blue and Alpha value (all between 0.0 - 1.0)
2.215883
2.185407
1.013945
from rafcon.gui.mygaphas.items.state import StateView, NameView if isinstance(item, StateView): return item.border_width * view.get_zoom_factor() / 1.5 elif isinstance(item, NameView): return item.parent.border_width * view.get_zoom_factor() / 2.5 return 0
def get_side_length_of_resize_handle(view, item)
Calculate the side length of a resize handle :param rafcon.gui.mygaphas.view.ExtendedGtkView view: View :param rafcon.gui.mygaphas.items.state.StateView item: StateView :return: side length :rtype: float
6.527085
3.109085
2.099359
c = cairo_context rot_angle = .0 move_x = 0. move_y = 0. if port_side is SnappedSide.RIGHT: move_x = pos[0] + name_size[0] move_y = pos[1] c.rectangle(move_x, move_y, value_size[0], value_size[1]) elif port_side is SnappedSide.BOTTOM: move_x = pos[0] - value_size[1] move_y = pos[1] + name_size[0] rot_angle = pi / 2. c.rectangle(move_x, move_y, value_size[1], value_size[0]) elif port_side is SnappedSide.LEFT: move_x = pos[0] - value_size[0] move_y = pos[1] c.rectangle(move_x, move_y, value_size[0], value_size[1]) elif port_side is SnappedSide.TOP: move_x = pos[0] - value_size[1] move_y = pos[1] - value_size[0] rot_angle = -pi / 2. c.rectangle(move_x, move_y, value_size[1], value_size[0]) c.set_source_rgba(*color) c.fill_preserve() c.set_source_rgb(*gui_config.gtk_colors['BLACK'].to_floats()) c.stroke() return rot_angle, move_x, move_y
def draw_data_value_rect(cairo_context, color, value_size, name_size, pos, port_side)
This method draws the containing rect for the data port value, depending on the side and size of the label. :param cairo_context: Draw Context :param color: Background color of value part :param value_size: Size (width, height) of label holding the value :param name_size: Size (width, height) of label holding the name :param pos: Position of name label start point (upper left corner of label) :param port_side: Side on which the value part should be drawn :return: Rotation Angle (to rotate value accordingly), X-Position of value label start point, Y-Position of value label start point
1.798939
1.755972
1.024469
c = context # The current point is the port position # Mover to outer border of state c.rel_move_to(0, port_offset) # Draw line to arrow tip of label c.rel_line_to(0, distance_to_port) # Line to upper left corner c.rel_line_to(-width / 2., arrow_height) # Line to lower left corner c.rel_line_to(0, height - arrow_height) # Line to lower right corner c.rel_line_to(width, 0) # Line to upper right corner c.rel_line_to(0, -(height - arrow_height)) # Line to center top (tip of label) c.rel_line_to(-width / 2., -arrow_height) # Close path c.close_path()
def draw_label_path(context, width, height, arrow_height, distance_to_port, port_offset)
Draws the path for an upright label :param context: The Cairo context :param float width: Width of the label :param float height: Height of the label :param float distance_to_port: Distance to the port related to the label :param float port_offset: Distance from the port center to its border :param bool draw_connection_to_port: Whether to draw a line from the tip of the label to the port
3.293852
3.336159
0.987319
self.destruction_signal.emit() try: self.unregister_observer(self) except KeyError: # Might happen if the observer was already unregistered pass if recursive: if self.state_copy: self.state_copy.prepare_destruction(recursive) self.state_copy = None else: if self.state_copy_initialized: logger.verbose("Multiple calls of prepare destruction for {0}".format(self)) # The next lines are commented because not needed and create problems if used why it is an open to-do # for port in self.input_data_ports[:] + self.output_data_ports[:] + self.outcomes[:]: # if port.core_element is not None: # # TODO setting data ports None in a Library state cause gtkmvc3 attribute getter problems why? # port.prepare_destruction() del self.input_data_ports[:] del self.output_data_ports[:] del self.outcomes[:] self.state = None
def prepare_destruction(self, recursive=True)
Prepares the model for destruction Recursively un-registers all observers and removes references to child models
8.835974
8.722311
1.013031
if not self.state_copy_initialized: return self.input_data_ports = [] for input_data_port_m in self.state_copy.input_data_ports: new_ip_m = deepcopy(input_data_port_m) new_ip_m.parent = self new_ip_m.data_port = input_data_port_m.data_port self.input_data_ports.append(new_ip_m)
def _load_input_data_port_models(self)
Reloads the input data port models directly from the the state
2.731777
2.441322
1.118974
if not self.state_copy_initialized: return self.output_data_ports = [] for output_data_port_m in self.state_copy.output_data_ports: new_op_m = deepcopy(output_data_port_m) new_op_m.parent = self new_op_m.data_port = output_data_port_m.data_port self.output_data_ports.append(new_op_m)
def _load_output_data_port_models(self)
Reloads the output data port models directly from the the state
2.716919
2.469764
1.100072
if not self.state_copy_initialized: return self.income = None income_m = deepcopy(self.state_copy.income) income_m.parent = self income_m.income = income_m.income self.income = income_m
def _load_income_model(self)
Reloads the income model directly from the state
6.061557
4.977165
1.217873
if not self.state_copy_initialized: return self.outcomes = [] for outcome_m in self.state_copy.outcomes: new_oc_m = deepcopy(outcome_m) new_oc_m.parent = self new_oc_m.outcome = outcome_m.outcome self.outcomes.append(new_oc_m)
def _load_outcome_models(self)
Reloads the outcome models directly from the state
3.649739
3.22156
1.13291
current_hierarchy_depth = self.state.library_hierarchy_depth max_hierarchy_depth = global_gui_config.get_config_value("MAX_VISIBLE_LIBRARY_HIERARCHY", 2) if current_hierarchy_depth >= max_hierarchy_depth: return False if current_hierarchy_depth > 1: uppermost_lib_state = self.state.get_uppermost_library_root_state().parent uppermost_lib_state_m = self.get_state_machine_m().get_state_model_by_path(uppermost_lib_state.get_path()) else: uppermost_lib_state_m = self uppermost_lib_meta = uppermost_lib_state_m.meta return False if 'show_content' not in uppermost_lib_meta['gui'] else uppermost_lib_meta['gui']['show_content']
def show_content(self)
Check if content of library is to be shown Content is shown, if the uppermost state's meta flag "show_content" is True and the library hierarchy depth (up to MAX_VISIBLE_LIBRARY_HIERARCHY level) is not to high. :return: Whether the content is to be shown :rtype: bool
4.192325
3.052916
1.37322
if self.core_element is None: logger.verbose("Multiple calls of prepare destruction for {0}".format(self)) self.destruction_signal.emit() try: self.unregister_observer(self) except KeyError: # Might happen if the observer was already unregistered pass super(StateElementModel, self).prepare_destruction()
def prepare_destruction(self)
Prepares the model for destruction Unregisters the model from observing itself.
8.276867
7.092593
1.166973
if self.parent is not None: self.parent.model_changed(model, prop_name, info)
def model_changed(self, model, prop_name, info)
This method notifies the parent state about changes made to the state element
2.812178
2.639602
1.065379
if self.parent is not None: msg = info.arg # Add information about notification to the signal message notification = Notification(model, prop_name, info) msg = msg._replace(notification=notification) info.arg = msg self.parent.meta_changed(model, prop_name, info)
def meta_changed(self, model, prop_name, info)
This method notifies the parent state about changes made to the meta data
5.317949
5.106064
1.041497
config_key = info['args'][1] if config_key in ["EXECUTION_TICKER_ENABLED"]: self.check_configuration()
def on_config_value_changed(self, config_m, prop_name, info)
Callback when a config value has been changed :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'config' :param dict info: Information e.g. about the changed config key
15.694906
22.591602
0.694723
self.ticker_text_label.hide() if self.current_observed_sm_m: self.stop_sm_m_observation(self.current_observed_sm_m)
def disable(self)
Relieve all state machines that have no active execution and hide the widget
13.292959
10.544242
1.260684
from rafcon.gui.utils.notification_overview import NotificationOverview from rafcon.core.states.state import State def name_and_next_state(state): assert isinstance(state, State) if state.is_root_state_of_library: return state.parent.parent, state.parent.name else: return state.parent, state.name def create_path(state, n=3, separator='/'): next_parent, name = name_and_next_state(state) path = separator + name n -= 1 while n > 0 and isinstance(next_parent, State): next_parent, name = name_and_next_state(next_parent) path = separator + name + path n -= 1 if isinstance(next_parent, State): path = separator + '..' + path return path if 'kwargs' in info and 'method_name' in info['kwargs']: overview = NotificationOverview(info) if overview['method_name'][-1] == 'state_execution_status': active_state = overview['model'][-1].state assert isinstance(active_state, State) path_depth = rafcon.gui.singleton.global_gui_config.get_config_value("EXECUTION_TICKER_PATH_DEPTH", 3) message = self._fix_text_of_label + create_path(active_state, path_depth) if rafcon.gui.singleton.main_window_controller.view is not None: self.ticker_text_label.set_text(message) else: logger.warn("Not initialized yet")
def on_state_execution_status_changed_after(self, model, prop_name, info)
Show current execution status in the widget This function specifies what happens if the state machine execution status of a state changes :param model: the model of the state that has changed (most likely its execution status) :param prop_name: property name that has been changed :param info: notification info dictionary :return:
4.365845
4.323237
1.009856
if not self._view_initialized: return active_sm_id = rafcon.gui.singleton.state_machine_manager_model.state_machine_manager.active_state_machine_id if active_sm_id is None: # relieve all state machines that have no active execution and hide the widget self.disable() else: # observe all state machines that have an active execution and show the widget self.check_configuration()
def execution_engine_model_changed(self, model, prop_name, info)
Active observation of state machine and show and hide widget.
8.284391
6.982406
1.186467
self.execution_engine.add_observer(self, "start", notify_before_function=self.on_start) self.execution_engine.add_observer(self, "pause", notify_before_function=self.on_pause) self.execution_engine.add_observer(self, "stop", notify_before_function=self.on_stop)
def register_observer(self)
Register all observable which are of interest
2.538741
2.454654
1.034256
root = state_machine.root_state root.add_observer(self, "state_execution_status", notify_after_function=self.on_state_execution_status_changed_after) self.recursively_register_child_states(root)
def register_states_of_state_machine(self, state_machine)
This functions registers all states of state machine. :param state_machine: the state machine to register all states of :return:
7.2285
8.109012
0.891416
self.logger.info("Execution status observer add new state {}".format(state)) if isinstance(state, ContainerState): state.add_observer(self, "add_state", notify_after_function=self.on_add_state) for state in list(state.states.values()): self.recursively_register_child_states(state) state.add_observer(self, "state_execution_status", notify_after_function=self.on_state_execution_status_changed_after) if isinstance(state, LibraryState): self.recursively_register_child_states(state.state_copy) state.add_observer(self, "state_execution_status", notify_after_function=self.on_state_execution_status_changed_after)
def recursively_register_child_states(self, state)
A function tha registers recursively all child states of a state :param state: :return:
3.26489
3.282568
0.994615
self.logger.info("Execution status observer register new state machine sm_id: {}".format(args[1].state_machine_id)) self.register_states_of_state_machine(args[1])
def on_add_state_machine_after(self, observable, return_value, args)
This method specifies what happens when a state machine is added to the state machine manager :param observable: the state machine manager :param return_value: the new state machine :param args: :return:
7.844352
9.429403
0.831903
self.logger.info("Execution status has changed for state '{0}' to status: {1}" "".format(observable.get_path(by_name=True), observable.state_execution_status))
def on_state_execution_status_changed_after(self, observable, return_value, args)
This function specifies what happens if the state machine execution status of a state changes :param observable: the state whose execution status changed :param return_value: the new execution status :param args: a list of all arguments of the observed function :return:
7.002142
7.643576
0.916082
logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else "")) # reset variables self.child_state = None self.last_error = None self.last_child = None self.last_transition = None if self.backward_execution: self.setup_backward_run() else: # forward_execution self.setup_run() self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE if self.backward_execution: last_history_item = self.execution_history.pop_last_item() assert isinstance(last_history_item, ReturnItem) self.scoped_data = last_history_item.scoped_data else: # forward_execution self.execution_history.push_call_history_item(self, CallType.CONTAINER, self, self.input_data) self.child_state = self.get_start_state(set_final_outcome=True) if self.child_state is None: self.child_state = self.handle_no_start_state()
def _initialize_hierarchy(self)
This function covers the whole initialization routine before executing a hierarchy state. :return:
4.477569
4.433981
1.00983
try: self._initialize_hierarchy() while self.child_state is not self: # print("hs1", self.name) self.handling_execution_mode = True execution_mode = singleton.state_machine_execution_engine.handle_execution_mode(self, self.child_state) # in the case of starting the sm from a specific state not the transitions define the logic flow # but the the execution_engine.run_to_states; thus, do not alter the next state in this case if not self._start_state_modified: # check if e.g. the state machine was paused and the next state was modified (e.g. removed) self.check_if_child_state_was_modified() self.handling_execution_mode = False if self.state_execution_status is not StateExecutionStatus.EXECUTE_CHILDREN: self.state_execution_status = StateExecutionStatus.EXECUTE_CHILDREN # print("hs2", self.name) self.backward_execution = False if self.preempted: if self.last_transition and self.last_transition.from_outcome == -2: logger.debug("Execute preemption handling for '{0}'".format(self.child_state)) else: break elif execution_mode == StateMachineExecutionStatus.BACKWARD: break_loop = self._handle_backward_execution_before_child_execution() if break_loop: break # This is only the case if this hierarchy-state is started in backward mode, # but the user directly switches to the forward execution mode if self.child_state is None: break # print("hs3", self.name) self._execute_current_child() if self.backward_execution: # print("hs4", self.name) break_loop = self._handle_backward_execution_after_child_execution() if break_loop: # print("hs4.1", self.name) break else: # print("hs5", self.name) break_loop = self._handle_forward_execution_after_child_execution() if break_loop: break # print("hs6", self.name) return self._finalize_hierarchy() except Exception as e: logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc()))) self.output_data["error"] = e self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE self.child_state = None self.last_child = None return self.finalize(Outcome(-1, "aborted"))
def run(self)
This defines the sequence of actions that are taken when the hierarchy is executed. A hierarchy state executes all its child states recursively. Principally this code collects all input data for the next child state, executes it, stores its output data and determines the next state based on the outcome of the child state. :return:
5.065266
4.849157
1.044566
self.backward_execution = True last_history_item = self.execution_history.pop_last_item() if last_history_item.state_reference is self: # if the the next child_state in the history is self exit this hierarchy-state if self.child_state: # do not set the last state to inactive before executing the new one self.child_state.state_execution_status = StateExecutionStatus.INACTIVE return True assert isinstance(last_history_item, ReturnItem) self.scoped_data = last_history_item.scoped_data self.child_state = last_history_item.state_reference return False
def _handle_backward_execution_before_child_execution(self)
Sets up all data after receiving a backward execution step from the execution engine :return: a flag to indicate if normal child state execution should abort
6.375741
6.010361
1.060792
self.child_state.input_data = self.get_inputs_for_state(self.child_state) self.child_state.output_data = self.create_output_dictionary_for_state(self.child_state) # process data of last state if self.last_error: self.child_state.input_data['error'] = copy.deepcopy(self.last_error) self.last_error = None if self.last_child: # do not set the last state to inactive before executing the new one self.last_child.state_execution_status = StateExecutionStatus.INACTIVE self.child_state.generate_run_id() if not self.backward_execution: # only add history item if it is not a backward execution self.execution_history.push_call_history_item( self.child_state, CallType.EXECUTE, self, self.child_state.input_data) self.child_state.start(self.execution_history, backward_execution=self.backward_execution, generate_run_id=False) self.child_state.join() # this line is important to indicate the parent the current execution status # it may also change during the execution of an hierarchy state # e.g. a hierarchy state may be started in forward execution mode but can leave in backward execution mode # print(self.child_state) # print(self.child_state.backward_execution) self.backward_execution = self.child_state.backward_execution # for h in self.execution_history._history_items: # print(h) if self.preempted: if self.backward_execution: # this is the case if the user backward step through its state machine and stops it # as preemption behaviour in backward mode is not defined, set the state to forward mode # to ensure clean state machine shutdown self.backward_execution = False # set last_error and self.last_child if self.child_state.final_outcome is not None: # final outcome can be None if only one state in a # hierarchy state is executed and immediately backward executed if self.child_state.final_outcome.outcome_id == -1: # if the child_state aborted save the error self.last_error = "" if 'error' in self.child_state.output_data: self.last_error = self.child_state.output_data['error'] self.last_child = self.child_state
def _execute_current_child(self)
Collect all data for a child state and execute it. :return:
5.143758
5.032874
1.022032
self.child_state.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE # the item popped now from the history will be a CallItem and will contain the scoped data, # that was valid before executing the child_state last_history_item = self.execution_history.pop_last_item() assert isinstance(last_history_item, CallItem) # copy the scoped_data of the history from the point before the child_state was executed self.scoped_data = last_history_item.scoped_data # this is a look-ahead step to directly leave this hierarchy-state if the last child_state # was executed; this leads to the backward and forward execution of a hierarchy child_state # having the exact same number of steps last_history_item = self.execution_history.get_last_history_item() if last_history_item.state_reference is self: last_history_item = self.execution_history.pop_last_item() assert isinstance(last_history_item, CallItem) self.scoped_data = last_history_item.scoped_data self.child_state.state_execution_status = StateExecutionStatus.INACTIVE return True return False
def _handle_backward_execution_after_child_execution(self)
Cleanup the former child state execution and prepare for the next state execution in the backward execution case. :return: a flag to indicate if normal child state execution should abort
5.153285
4.995213
1.031645
self.add_state_execution_output_to_scoped_data(self.child_state.output_data, self.child_state) self.update_scoped_variables_with_output_dictionary(self.child_state.output_data, self.child_state) self.execution_history.push_return_history_item( self.child_state, CallType.EXECUTE, self, self.child_state.output_data) # not explicitly connected preempted outcomes are implicit connected to parent preempted outcome transition = self.get_transition_for_outcome(self.child_state, self.child_state.final_outcome) if transition is None: transition = self.handle_no_transition(self.child_state) # if the transition is still None, then the child_state was preempted or aborted, in this case # return if transition is None: return True self.last_transition = transition self.child_state = self.get_state_for_transition(transition) if transition is not None and self.child_state is self: self.final_outcome = self.outcomes[transition.to_outcome] if self.child_state is self: singleton.state_machine_execution_engine._modify_run_to_states(self) return False
def _handle_forward_execution_after_child_execution(self)
Cleanup the former child state execution and prepare for the next state execution in the forward execution case. :return: a flag to indicate if normal child state execution should abort
5.620065
5.44152
1.032812
if self.last_child: self.last_child.state_execution_status = StateExecutionStatus.INACTIVE if not self.backward_execution: if self.last_error: self.output_data['error'] = copy.deepcopy(self.last_error) self.write_output_data() self.check_output_data_type() self.execution_history.push_return_history_item(self, CallType.CONTAINER, self, self.output_data) # add error message from child_state to own output_data self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE if self.preempted: self.final_outcome = Outcome(-2, "preempted") self.child_state = None self.last_child = None return self.finalize(self.final_outcome)
def _finalize_hierarchy(self)
This function finalizes the execution of a hierarchy state. It sets the correct status and manages the output data handling. :return:
6.565154
6.078319
1.080094
icons = {'Libraries': constants.SIGN_LIB, 'States Tree': constants.ICON_TREE, 'Global Variables': constants.ICON_GLOB, 'Modification History': constants.ICON_HIST, 'Execution History': constants.ICON_EHIST, 'network': constants.ICON_NET} for notebook in self.left_bar_notebooks: for i in range(notebook.get_n_pages()): child = notebook.get_nth_page(i) tab_label = notebook.get_tab_label(child) tab_label_text = tab_label.get_text() notebook.set_tab_label(child, gui_helper_label.create_tab_header_label(tab_label_text, icons)) notebook.set_tab_reorderable(child, True) notebook.set_tab_detachable(child, True)
def rotate_and_detach_tab_labels(self)
Rotates tab labels of a given notebook by 90 degrees and makes them detachable. :param notebook: GTK Notebook container, whose tab labels are to be rotated and made detachable
4.432632
4.363257
1.0159
found = False for notebook in self.left_bar_notebooks: for i in range(notebook.get_n_pages()): if gui_helper_label.get_notebook_tab_title(notebook, i) == gui_helper_label.get_widget_title(tab_label): found = True break if found: notebook.set_current_page(i) break
def bring_tab_to_the_top(self, tab_label)
Find tab with label tab_label in list of notebooks and set it to the current page. :param tab_label: String containing the label of the tab to be focused
4.058688
4.26171
0.952361
task_string = "create transition" sub_task_string = "to parent state" selected_state_m, msg = get_selected_single_state_model_and_check_for_its_parent() if selected_state_m is None: logger.warning("Can not {0} {1}: {2}".format(task_string, sub_task_string, msg)) return logger.debug("Check to {0} {1} ...".format(task_string, sub_task_string)) state = selected_state_m.state parent_state = state.parent # find all possible from outcomes from_outcomes = get_all_outcomes_except_of_abort_and_preempt(state) # find lowest valid outcome id possible_oc_ids = [oc_id for oc_id in state.parent.outcomes.keys() if oc_id >= 0] possible_oc_ids.sort() to_outcome = state.parent.outcomes[possible_oc_ids[0]] oc_connected_to_parent = [oc for oc in from_outcomes if is_outcome_connect_to_state(oc, parent_state.state_id)] oc_not_connected = [oc for oc in from_outcomes if not state.parent.get_transition_for_outcome(state, oc)] if all(oc in oc_connected_to_parent for oc in from_outcomes): logger.info("Remove transition {0} because all outcomes are connected to it.".format(sub_task_string)) for from_outcome in oc_connected_to_parent: transition = parent_state.get_transition_for_outcome(state, from_outcome) parent_state.remove(transition) elif oc_not_connected: logger.debug("Create transition {0} ... ".format(sub_task_string)) for from_outcome in from_outcomes: parent_state.add_transition(state.state_id, from_outcome.outcome_id, parent_state.state_id, to_outcome.outcome_id) else: if remove_transitions_if_target_is_the_same(from_outcomes): logger.info("Removed transitions origin from outcomes of selected state {0}" "because all point to the same target.".format(sub_task_string)) return add_transitions_from_selected_state_to_parent() logger.info("Will not create transition {0}: Not clear situation of connected transitions." "There will be no transitions to other states be touched.".format(sub_task_string)) return True
def add_transitions_from_selected_state_to_parent()
Generates the default success transition of a state to its parent success port :return:
3.856989
3.930946
0.981186
task_string = "create transition" sub_task_string = "to closest sibling state" selected_state_m, msg = get_selected_single_state_model_and_check_for_its_parent() if selected_state_m is None: logger.warning("Can not {0} {1}: {2}".format(task_string, sub_task_string, msg)) return logger.debug("Check to {0} {1} ...".format(task_string, sub_task_string)) state = selected_state_m.state parent_state = state.parent # find closest other state to connect to -> to_state closest_sibling_state_tuple = gui_helper_meta_data.get_closest_sibling_state(selected_state_m, 'outcome') if closest_sibling_state_tuple is None: logger.info("Can not {0} {1}: There is no other sibling state.".format(task_string, sub_task_string)) return distance, sibling_state_m = closest_sibling_state_tuple to_state = sibling_state_m.state # find all possible from outcomes from_outcomes = get_all_outcomes_except_of_abort_and_preempt(state) from_oc_not_connected = [oc for oc in from_outcomes if not state.parent.get_transition_for_outcome(state, oc)] # all ports not connected connect to next state income if from_oc_not_connected: logger.debug("Create transition {0} ...".format(sub_task_string)) for from_outcome in from_oc_not_connected: parent_state.add_transition(state.state_id, from_outcome.outcome_id, to_state.state_id, None) # no transitions are removed if not all connected to the same other state else: target = remove_transitions_if_target_is_the_same(from_outcomes) if target: target_state_id, _ = target if not target_state_id == to_state.state_id: logger.info("Removed transitions from outcomes {0} " "because all point to the same target.".format(sub_task_string.replace('closest ', ''))) add_transitions_to_closest_sibling_state_from_selected_state() else: logger.info("Removed transitions from outcomes {0} " "because all point to the same target.".format(sub_task_string)) return True logger.info("Will not {0} {1}: Not clear situation of connected transitions." "There will be no transitions to other states be touched.".format(task_string, sub_task_string)) return True
def add_transitions_to_closest_sibling_state_from_selected_state()
Generates the outcome transitions from outcomes with positive outcome_id to the closest next state :return:
4.154297
4.149578
1.001137
for idx, row in network.buses.iterrows(): wkt_geom = to_shape(row['geom']) network.buses.loc[idx, 'x'] = wkt_geom.x network.buses.loc[idx, 'y'] = wkt_geom.y return network
def add_coordinates(network)
Add coordinates to nodes based on provided geom Parameters ---------- network : PyPSA network container Returns ------- Altered PyPSA network container ready for plotting
2.918243
3.207088
0.909936
cmap = plt.cm.jet array_line = [['Line'] * len(networkA.lines), networkA.lines.index] extension_lines = pd.Series(100 *\ ((networkA.lines.s_nom_opt - \ networkB.lines.s_nom_opt)/\ networkA.lines.s_nom_opt ).values,\ index=array_line) array_link = [['Link'] * len(networkA.links), networkA.links.index] extension_links = pd.Series(100 * ((networkA.links.p_nom_opt -\ networkB.links.p_nom_opt)/\ networkA.links.p_nom_opt).values,\ index=array_link) extension = extension_lines.append(extension_links) ll = networkA.plot( line_colors=extension, line_cmap=cmap, bus_sizes=0, title="Derivation of AC- and DC-line extension", line_widths=2) if not boundaries: v = np.linspace(min(extension), max(extension), 101) boundaries = [min(extension).round(0), max(extension).round(0)] else: v = np.linspace(boundaries[0], boundaries[1], 101) if not extension_links.empty: cb_Link = plt.colorbar(ll[2], boundaries=v, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb_Link.remove() cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10], fraction=0.046, pad=0.04) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb.set_label('line extension derivation in %') if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def network_expansion_diff (networkA, networkB, filename=None, boundaries=[])
Plot relative network expansion derivation of AC- and DC-lines. Parameters ---------- networkA: PyPSA network container Holds topology of grid including results from powerflow analysis networkB: PyPSA network container Holds topology of grid including results from powerflow analysis filename: str or None Save figure in this direction boundaries: array Set boundaries of heatmap axis
3.328017
3.080211
1.080451
cmap = plt.cm.jet array_line = [['Line'] * len(network.lines), network.lines.index] load_lines = pd.Series(abs((network.lines_t.p0.mul( network.snapshot_weightings, axis=0).sum() / (network.lines.s_nom))).data, index=array_line) array_link = [['Link'] * len(network.links), network.links.index] load_links = pd.Series(abs((network.links_t.p0.mul( network.snapshot_weightings, axis=0).sum() / (network.links.p_nom))).data, index=array_link) load_hours = load_lines.append(load_links) ll = network.plot(line_colors=load_hours, line_cmap=cmap, bus_sizes=0, title="Full load-hours of lines", line_widths=2) if not boundaries: cb = plt.colorbar(ll[1]) cb_Link = plt.colorbar(ll[2]) elif boundaries: v = np.linspace(boundaries[0], boundaries[1], 101) cb_Link = plt.colorbar(ll[2], boundaries=v, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10]) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) if two_cb: cb_Link.set_label('Number of full-load hours of DC-lines') cb.set_label('Number of full-load hours of AC-lines') else: cb.set_label('Number of full-load hours') if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def full_load_hours(network, boundaries=[], filename=None, two_cb=False)
Plot loading of lines in equivalten full load hours. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis filename: str or None Save figure in this direction boundaries: array Set boundaries of heatmap axis two_cb: bool Choose if an extra colorbar for DC-lines is plotted
3.003042
2.882145
1.041947
cmap_line = plt.cm.jet q_flows_max = abs(network.lines_t.q0.abs().max()/(network.lines.s_nom)) ll = network.plot(line_colors = q_flows_max, line_cmap = cmap_line) boundaries = [min(q_flows_max), max(q_flows_max)] v = np.linspace(boundaries[0], boundaries[1], 101) cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10]) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1])
def plot_q_flows(network)
Plot maximal reactive line load. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis
4.878423
4.782852
1.019982
cmap_line = plt.cm.jet cmap_link = plt.cm.jet array_line = [['Line'] * len(network.lines), network.lines.index] array_link = [['Link'] * len(network.links), network.links.index] if network.lines_t.q0.empty: load_lines = pd.Series((abs(network.lines_t.p0).max( ) / (network.lines.s_nom) * 100).data, index=array_line) else: load_lines = pd.Series(((network.lines_t.p0**2 + network.lines_t.q0 ** 2).max().apply(sqrt)/ (network.lines.s_nom) * 100).data, index=array_line) load_links = pd.Series((abs(network.links_t.p0.max( ) / (network.links.p_nom)) * 100).data, index=array_link) max_load = load_lines.append(load_links) ll = network.plot( line_colors=max_load, line_cmap={ 'Line': cmap_line, 'Link': cmap_link}, bus_sizes=0, title="Maximum of line loading", line_widths=2) if not boundaries: boundaries = [min(max_load), max(max_load)] v = np.linspace(boundaries[0], boundaries[1], 101) cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10]) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb_Link = plt.colorbar(ll[2], boundaries=v, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) if two_cb: # cb_Link.set_label('Maximum load of DC-lines %') cb.set_label('Maximum load of AC-lines %') else: cb.set_label('Maximum load in %') if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def max_load(network, boundaries=[], filename=None, two_cb=False)
Plot maximum loading of each line. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis filename: str or None Save figure in this direction boundaries: array Set boundaries of heatmap axis two_cb: bool Choose if an extra colorbar for DC-lines is plotted
3.162608
3.006125
1.052055
cmap_line = plt.cm.jet cmap_link = plt.cm.jet array_line = [['Line'] * len(network.lines), network.lines.index] load_lines = pd.Series(((abs(network.lines_t.p0[( abs(network.lines_t.p0.mul(network.snapshot_weightings, axis=0)) / network.lines.s_nom_opt >= min_load) & ( abs(network.lines_t.p0.mul(network.snapshot_weightings, axis=0)) / network.lines.s_nom_opt <= max_load)]) / abs(network.lines_t.p0[( abs(network.lines_t.p0) / network.lines.s_nom_opt >= min_load) & (abs(network.lines_t.p0) / network.lines.s_nom_opt <= max_load)])) .sum()).data, index=array_line) array_link = [['Link'] * len(network.links), network.links.index] load_links = pd.Series(((abs(network.links_t.p0[( abs(network.links_t.p0.mul(network.snapshot_weightings, axis=0)) / network.links.p_nom_opt >= min_load) & ( abs(network.links_t.p0.mul(network.snapshot_weightings, axis=0)) / network.links.p_nom_opt <= max_load)]) / abs(network.links_t.p0[( abs(network.links_t.p0) / network.links.p_nom_opt >= min_load) & (abs(network.links_t.p0) / network.links.p_nom_opt <= max_load)])) .sum()).data, index=array_link) load_hours = load_lines.append(load_links) ll = network.plot( line_colors=load_hours, line_cmap={ 'Line': cmap_line, 'Link': cmap_link}, bus_sizes=0, title="Number of hours with more then 90% load", line_widths=2) v1 = np.linspace(boundaries[0], boundaries[1], 101) v = np.linspace(boundaries[0], boundaries[1], 101) cb_Link = plt.colorbar(ll[2], boundaries=v1, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10]) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb.set_label('Number of hours')
def load_hours(network, min_load=0.9, max_load=1, boundaries=[0, 8760])
Plot number of hours with line loading in selected range. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis min_load: float Choose lower bound of relative load max_load: float Choose upper bound of relative load boundaries: array Set boundaries of heatmap axis
2.377888
2.323559
1.023382
renewables = network.generators[ network.generators.carrier.isin(['wind_onshore', 'wind_offshore', 'solar', 'run_of_river', 'wind'])] renewables_t = network.generators.p_nom[renewables.index] * \ network.generators_t.p_max_pu[renewables.index].mul( network.snapshot_weightings, axis=0) load = network.loads_t.p_set.mul(network.snapshot_weightings, axis=0).\ sum(axis=1) all_renew = renewables_t.sum(axis=1) residual_load = load - all_renew plot = residual_load.plot( title = 'Residual load', drawstyle='steps', lw=2, color='red', legend=False) plot.set_ylabel("MW") # sorted curve sorted_residual_load = residual_load.sort_values( ascending=False).reset_index() plot1 = sorted_residual_load.plot( title='Sorted residual load', drawstyle='steps', lw=2, color='red', legend=False) plot1.set_ylabel("MW")
def plot_residual_load(network)
Plots residual load summed of all exisiting buses. Parameters ---------- network : PyPSA network containter
3.351539
3.310589
1.012369
if resolution == 'GW': reso_int = 1e3 elif resolution == 'MW': reso_int = 1 elif resolution == 'KW': reso_int = 0.001 # sum for all buses if bus is None: p_by_carrier = pd.concat([network.generators_t.p[network.generators [network.generators.control != 'Slack'].index], network.generators_t.p.mul( network.snapshot_weightings, axis=0) [network.generators[network.generators.control == 'Slack'].index] .iloc[:, 0].apply(lambda x: x if x > 0 else 0)], axis=1)\ .groupby(network.generators.carrier, axis=1).sum() load = network.loads_t.p.sum(axis=1) if hasattr(network, 'foreign_trade'): trade_sum = network.foreign_trade.sum(axis=1) p_by_carrier['imports'] = trade_sum[trade_sum > 0] p_by_carrier['imports'] = p_by_carrier['imports'].fillna(0) # sum for a single bus elif bus is not None: filtered_gens = network.generators[network.generators['bus'] == bus] p_by_carrier = network.generators_t.p.mul(network.snapshot_weightings, axis=0).groupby(filtered_gens.carrier, axis=1).abs().sum() filtered_load = network.loads[network.loads['bus'] == bus] load = network.loads_t.p.mul(network.snapshot_weightings, axis=0)\ [filtered_load.index] colors = coloring() # TODO: column reordering based on available columns fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 6) colors = [colors[col] for col in p_by_carrier.columns] if len(colors) == 1: colors = colors[0] (p_by_carrier / reso_int).plot(kind="area", ax=ax, linewidth=0, color=colors) (load / reso_int).plot(ax=ax, legend='load', lw=2, color='darkgrey', style='--') ax.legend(ncol=4, loc="upper left") ax.set_ylabel(resolution) ax.set_xlabel("") matplotlib.rcParams.update({'font.size': 22}) if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def plot_stacked_gen(network, bus=None, resolution='GW', filename=None)
Plot stacked sum of generation grouped by carrier type Parameters ---------- network : PyPSA network container bus: string Plot all generators at one specific bus. If none, sum is calulated for all buses resolution: string Unit for y-axis. Can be either GW/MW/KW Returns ------- Plot
3.187044
3.139623
1.015104
def gen_by_c(network): gen = pd.concat([network.generators_t.p.mul( network.snapshot_weightings, axis=0)[network.generators [network.generators.control != 'Slack'].index], network.generators_t.p.mul( network.snapshot_weightings, axis=0)[network.generators [network. generators.control == 'Slack'].index] .iloc[:, 0].apply(lambda x: x if x > 0 else 0)], axis=1)\ .groupby(network.generators.carrier,axis=1).sum() return gen gen = gen_by_c(networkB) gen_switches = gen_by_c(networkA) diff = gen_switches - gen colors = coloring() diff.drop(leave_out_carriers, axis=1, inplace=True) colors = [colors[col] for col in diff.columns] plot = diff.plot(kind='line', color=colors, use_index=False) plot.legend(loc='upper left', ncol=5, prop={'size': 8}) x = [] for i in range(0, len(diff)): x.append(i) plt.xticks(x, x) plot.set_xlabel('Timesteps') plot.set_ylabel('Difference in Generation in MW') plot.set_title('Difference in Generation') plt.tight_layout()
def plot_gen_diff( networkA, networkB, leave_out_carriers=[ 'geothermal', 'oil', 'other_non_renewable', 'reservoir', 'waste'])
Plot difference in generation between two networks grouped by carrier type Parameters ---------- networkA : PyPSA network container with switches networkB : PyPSA network container without switches leave_out_carriers : list of carriers to leave out (default to all small carriers) Returns ------- Plot
3.318051
3.264224
1.01649
x = np.array(network.buses['x']) y = np.array(network.buses['y']) alpha = np.array(network.buses_t.v_mag_pu.loc[network.snapshots[0]]) fig, ax = plt.subplots(1, 1) fig.set_size_inches(6, 4) cmap = plt.cm.jet if not boundaries: plt.hexbin(x, y, C=alpha, cmap=cmap, gridsize=100) cb = plt.colorbar() elif boundaries: v = np.linspace(boundaries[0], boundaries[1], 101) norm = matplotlib.colors.BoundaryNorm(v, cmap.N) plt.hexbin(x, y, C=alpha, cmap=cmap, gridsize=100, norm=norm) cb = plt.colorbar(boundaries=v, ticks=v[0:101:10], norm=norm) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb.set_label('Voltage Magnitude per unit of v_nom') network.plot( ax=ax, line_widths=pd.Series(0.5, network.lines.index), bus_sizes=0) plt.show()
def plot_voltage(network, boundaries=[])
Plot voltage at buses as hexbin Parameters ---------- network : PyPSA network container boundaries: list of 2 values, setting the lower and upper bound of colorbar Returns ------- Plot
2.777469
2.73457
1.015688