code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
p_by_carrier = network.generators_t.p.groupby\ (network.generators.carrier, axis=1).sum() capacity = network.generators.groupby("carrier").sum().at[carrier, "p_nom"] p_available = network.generators_t.p_max_pu.multiply( network.generators["p_nom"]) p_available_by_carrier = p_available.groupby( network.generators.carrier, axis=1).sum() p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier print(p_curtailed_by_carrier.sum()) p_df = pd.DataFrame({carrier + " available": p_available_by_carrier[carrier], carrier + " dispatched": p_by_carrier[carrier], carrier + " curtailed": p_curtailed_by_carrier[carrier]}) p_df[carrier + " capacity"] = capacity p_df[carrier + " curtailed"][p_df[carrier + " curtailed"] < 0.] = 0. fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 6) p_df[[carrier + " dispatched", carrier + " curtailed"] ].plot(kind="area", ax=ax, linewidth=3) p_df[[carrier + " available", carrier + " capacity"] ].plot(ax=ax, linewidth=3) ax.set_xlabel("") ax.set_ylabel("Power [MW]") ax.set_ylim([0, capacity * 1.1]) ax.legend() if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def curtailment(network, carrier='solar', filename=None)
Plot curtailment of selected carrier Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis carrier: str Plot curtailemt of this carrier filename: str or None Save figure in this direction Returns ------- Plot
2.522874
2.571408
0.981126
stores = network.storage_units storage_distribution = network.storage_units.p_nom_opt[stores.index]\ .groupby(network.storage_units.bus)\ .sum().reindex(network.buses.index, fill_value=0.) fig, ax = plt.subplots(1, 1) fig.set_size_inches(6, 6) msd_max = storage_distribution.max() msd_median = storage_distribution[storage_distribution != 0].median() msd_min = storage_distribution[storage_distribution > 1].min() if msd_max != 0: LabelVal = int(log10(msd_max)) else: LabelVal = 0 if LabelVal < 0: LabelUnit = 'kW' msd_max, msd_median, msd_min = msd_max * \ 1000, msd_median * 1000, msd_min * 1000 storage_distribution = storage_distribution * 1000 elif LabelVal < 3: LabelUnit = 'MW' else: LabelUnit = 'GW' msd_max, msd_median, msd_min = msd_max / \ 1000, msd_median / 1000, msd_min / 1000 storage_distribution = storage_distribution / 1000 if sum(storage_distribution) == 0: network.plot(bus_sizes=0, ax=ax, title="No storages") else: network.plot( bus_sizes=storage_distribution * scaling, ax=ax, line_widths=0.3, title="Storage distribution") # Here we create a legend: # we'll plot empty lists with the desired size and label for area in [msd_max, msd_median, msd_min]: plt.scatter([], [], c='white', s=area * scaling, label='= ' + str(round(area, 0)) + LabelUnit + ' ') plt.legend(scatterpoints=1, labelspacing=1, title='Storage size') if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def storage_distribution(network, scaling=1, filename=None)
Plot storage distribution as circles on grid nodes Displays storage size and distribution in network. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis filename : str Specify filename If not given, figure will be show directly
2.900803
2.915169
0.995072
if techs is None: techs = networkA.generators.carrier.unique() else: techs = techs n_graphs = len(techs) n_cols = n_cols if n_graphs % n_cols == 0: n_rows = n_graphs // n_cols else: n_rows = n_graphs // n_cols + 1 fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols) size = 4 fig.set_size_inches(size * n_cols, size * n_rows) for i, tech in enumerate(techs): i_row = i // n_cols i_col = i % n_cols ax = axes[i_row, i_col] gensA = networkA.generators[networkA.generators.carrier == tech] gensB = networkB.generators[networkB.generators.carrier == tech] gen_distribution =\ networkA.generators_t.p.mul(networkA.snapshot_weightings, axis=0)\ [gensA.index].loc[networkA.snapshots[snapshot]].groupby( networkA.generators.bus).sum().reindex( networkA.buses.index, fill_value=0.) -\ networkB.generators_t.p.mul(networkB.snapshot_weightings, axis=0)\ [gensB.index].loc[networkB.snapshots[snapshot]].groupby( networkB.generators.bus).sum().reindex( networkB.buses.index, fill_value=0.) networkA.plot( ax=ax, bus_sizes=gen_size * abs(gen_distribution), bus_colors=gen_distribution, line_widths=0.1, bus_cmap=buscmap) ax.set_title(tech) if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def gen_dist_diff( networkA, networkB, techs=None, snapshot=0, n_cols=3, gen_size=0.2, filename=None, buscmap=plt.cm.jet)
Difference in generation distribution Green/Yellow/Red colors mean that the generation at a location is bigger with switches than without Blue colors mean that the generation at a location is smaller with switches than without Parameters ---------- networkA : PyPSA network container Holds topology of grid with switches including results from powerflow analysis networkB : PyPSA network container Holds topology of grid without switches including results from powerflow analysis techs : dict type of technologies which shall be plotted snapshot : int snapshot n_cols : int number of columns of the plot gen_size : num size of generation bubbles at the buses filename : str Specify filename If not given, figure will be show directly
2.029393
2.083839
0.973872
if techs is None: techs = network.generators.carrier.unique() else: techs = techs n_graphs = len(techs) n_cols = n_cols if n_graphs % n_cols == 0: n_rows = n_graphs // n_cols else: n_rows = n_graphs // n_cols + 1 fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols) size = 4 fig.set_size_inches(size * n_cols, size * n_rows) for i, tech in enumerate(techs): i_row = i // n_cols i_col = i % n_cols ax = axes[i_row, i_col] gens = network.generators[network.generators.carrier == tech] gen_distribution = network.generators_t.p.mul(network. snapshot_weightings, axis=0)\ [gens.index].loc[network.snapshots[snapshot]].groupby( network.generators.bus).sum().reindex( network.buses.index, fill_value=0.) network.plot( ax=ax, bus_sizes=gen_size * gen_distribution, line_widths=0.1) ax.set_title(tech) if filename is None: plt.show() else: plt.savefig(filename) plt.close()
def gen_dist( network, techs=None, snapshot=1, n_cols=3, gen_size=0.2, filename=None)
Generation distribution Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis techs : dict type of technologies which shall be plotted snapshot : int snapshot n_cols : int number of columns of the plot gen_size : num size of generation bubbles at the buses filename : str Specify filename If not given, figure will be show directly
2.590289
2.620305
0.988545
if techs: gens = network.generators[network.generators.carrier.isin(techs)] elif techs is None: gens = network.generators techs = gens.carrier.unique() if item == 'capacity': dispatch = gens.p_nom.groupby([network.generators.bus, network.generators.carrier]).sum() elif item == 'energy': if networkB: dispatch_network =\ network.generators_t.p[gens.index].mul( network.snapshot_weightings, axis=0).groupby( [network.generators.bus, network.generators.carrier], axis=1).sum() dispatch_networkB =\ networkB.generators_t.p[gens.index].mul( networkB.snapshot_weightings, axis=0).groupby( [networkB.generators.bus, networkB.generators.carrier], axis=1).sum() dispatch = dispatch_network - dispatch_networkB if direction == 'positive': dispatch = dispatch[dispatch > 0].fillna(0) elif direction == 'negative': dispatch = dispatch[dispatch < 0].fillna(0) elif direction == 'absolute': pass else: return('No valid direction given.') dispatch = dispatch.sum() elif networkB is None: dispatch =\ network.generators_t.p[gens.index].mul( network.snapshot_weightings, axis=0).sum().groupby( [network.generators.bus, network.generators.carrier]).sum() fig, ax = plt.subplots(1, 1) scaling = 1/(max(abs(dispatch.groupby(level=0).sum())))*scaling if direction != 'absolute': colors = coloring() subcolors = {a: colors[a] for a in techs} dispatch = dispatch.abs() + 1e-9 else: dispatch = dispatch.sum(level=0) colors = {s[0]: 'green' if s[1] > 0 else 'red' for s in dispatch.iteritems()} dispatch = dispatch.abs() subcolors = {'negative': 'red', 'positive': 'green'} network.plot( bus_sizes=dispatch * scaling, bus_colors=colors, line_widths=0.2, margin=0.01, ax=ax) fig.subplots_adjust(right=0.8) plt.subplots_adjust(wspace=0, hspace=0.001) patchList = [] for key in subcolors: data_key = mpatches.Patch(color=subcolors[key], label=key) patchList.append(data_key) ax.legend(handles=patchList, loc='upper left') ax.autoscale() if filename is None: plt.show() else: plt.savefig(filename) plt.close() return
def nodal_gen_dispatch( network, networkB=None, techs=['wind_onshore', 'solar'], item='energy', direction=None, scaling=1, filename=None)
Plot nodal dispatch or capacity. If networkB is given, difference in dispatch is plotted. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis networkB : PyPSA network container If given and item is 'energy', difference in dispatch between network and networkB is plotted. If item is 'capacity', networkB is ignored. default None techs : None or list, Techs to plot. If None, all techs are plotted. default ['wind_onshore', 'solar'] item : str Specifies the plotted item. Options are 'energy' and 'capacity'. default 'energy' direction : str Only considered if networkB is given and item is 'energy'. Specifies the direction of change in dispatch between network and networkB. If 'positive', generation per tech which is higher in network than in networkB is plotted. If 'negative', generation per tech whcih is lower in network than in networkB is plotted. If 'absolute', total change per node is plotted. Green nodes have higher dispatch in network than in networkB. Red nodes have lower dispatch in network than in networkB. default None scaling : int Scaling to change plot sizes. default 1 filename : path to folder
2.822678
2.758387
1.023307
fig, ax = plt.subplots(1, 1) gen = network.generators_t.p.groupby(network.generators.bus, axis=1).sum() load = network.loads_t.p.groupby(network.loads.bus, axis=1).sum() if snapshot == 'all': diff = (gen - load).sum() else: timestep = network.snapshots[snapshot] diff = (gen - load).loc[timestep] colors = {s[0]: 'green' if s[1] > 0 else 'red' for s in diff.iteritems()} subcolors = {'Net Consumer': 'red', 'Net Producer': 'green'} diff = diff.abs() network.plot( bus_sizes=diff * scaling, bus_colors=colors, line_widths=0.2, margin=0.01, ax=ax) patchList = [] for key in subcolors: data_key = mpatches.Patch(color=subcolors[key], label=key) patchList.append(data_key) ax.legend(handles=patchList, loc='upper left') ax.autoscale() if filename: plt.savefig(filename) plt.close() return
def nodal_production_balance( network, snapshot='all', scaling=0.00001, filename=None)
Plots the nodal difference between generation and consumption. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis snapshot : int or 'all' Snapshot to plot. default 'all' scaling : int Scaling to change plot sizes. default 0.0001 filename : path to folder
3.276385
3.349572
0.97815
sbatt = network.storage_units.index[(network.storage_units.p_nom_opt>1) & (network.storage_units.capital_cost>10) & (network.storage_units.max_hours==6)] shydr = network.storage_units.index[(network.storage_units.p_nom_opt>1) & (network.storage_units.capital_cost>10) & (network.storage_units.max_hours==168)] cap_batt = (network.storage_units.max_hours[sbatt] * network.storage_units.p_nom_opt[sbatt]).sum() cap_hydr = (network.storage_units.max_hours[shydr] * network.storage_units.p_nom_opt[shydr]).sum() fig, ax = plt.subplots(1, 1) if network.storage_units.p_nom_opt[sbatt].sum() < 1 and \ network.storage_units.p_nom_opt[shydr].sum() < 1: print("No storage unit to plot") elif network.storage_units.p_nom_opt[sbatt].sum() > 1 and \ network.storage_units.p_nom_opt[shydr].sum() < 1: (network.storage_units_t.p[sbatt].sum(axis=1).sort_values( ascending=False).reset_index() / \ network.storage_units.p_nom_opt[sbatt].sum())[0].plot( ax=ax, label="Battery storage", color='orangered') elif network.storage_units.p_nom_opt[sbatt].sum() < 1 and \ network.storage_units.p_nom_opt[shydr].sum() > 1: (network.storage_units_t.p[shydr].sum(axis=1).sort_values( ascending=False).reset_index() / \ network.storage_units.p_nom_opt[shydr].sum())[0].plot( ax=ax, label="Hydrogen storage", color='teal') else: (network.storage_units_t.p[sbatt].sum(axis=1).sort_values( ascending=False).reset_index() / \ network.storage_units.p_nom_opt[sbatt].sum())[0].plot( ax=ax, label="Battery storage", color='orangered') (network.storage_units_t.p[shydr].sum(axis=1).sort_values( ascending=False).reset_index() / \ network.storage_units.p_nom_opt[shydr].sum())[0].plot( ax=ax, label="Hydrogen storage", color='teal') ax.set_xlabel("") ax.set_ylabel("Storage dispatch in p.u. \n <- charge - discharge ->") ax.set_ylim([-1.05,1.05]) ax.legend() ax.set_title("Sorted duration curve of storage dispatch") if filename is None: plt.show() else: plt.savefig(filename,figsize=(3,4),bbox_inches='tight') plt.close() return
def storage_soc_sorted(network, filename = None)
Plots the soc (state-pf-charge) of extendable storages Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis filename : path to folder
1.991628
2.011743
0.990001
# logger.debug("StateMachineEditionChangeHistory register state_machine old/new sm_id %s/%s" % # (self.__my_selected_sm_id, self.model.selected_state_machine_id)) # relieve old models if self.__my_selected_sm_id is not None: # no old models available self.relieve_model(self._selected_sm_model.history) if self.model.selected_state_machine_id is not None and global_gui_config.get_config_value('HISTORY_ENABLED'): # set own selected state machine id self.__my_selected_sm_id = self.model.selected_state_machine_id # observe new models self._selected_sm_model = self.model.state_machines[self.__my_selected_sm_id] if self._selected_sm_model.history: self.observe_model(self._selected_sm_model.history) self.update(None, None, None) else: logger.warning("The history is enabled but not generated {0}. {1}" "".format(self._selected_sm_model.state_machine.state_machine_id, self._selected_sm_model.state_machine.file_system_path)) else: if self.__my_selected_sm_id is not None: self.doing_update = True self.history_tree_store.clear() self.doing_update = False self.__my_selected_sm_id = None self._selected_sm_model = None
def register(self)
Change the state machine that is observed for new selected states to the selected state machine. :return:
4.10946
3.845108
1.06875
shortcut_manager.add_callback_for_action("undo", self.undo) shortcut_manager.add_callback_for_action("redo", self.redo)
def register_actions(self, shortcut_manager)
Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager:
3.688374
3.062398
1.204407
# TODO re-organize as request to controller which holds source-editor-view or any parent to it for key, tab in gui_singletons.main_window_controller.get_controller('states_editor_ctrl').tabs.items(): if tab['controller'].get_controller('source_ctrl') is not None and \ react_to_event(self.view, tab['controller'].get_controller('source_ctrl').view.textview, (key_value, modifier_mask)) or \ tab['controller'].get_controller('description_ctrl') is not None and \ react_to_event(self.view, tab['controller'].get_controller('description_ctrl').view.textview, (key_value, modifier_mask)): return False if self._selected_sm_model is not None: self._selected_sm_model.history.undo() return True else: logger.debug("Undo is not possible now as long as no state_machine is selected.")
def undo(self, key_value, modifier_mask)
Undo for selected state-machine if no state-source-editor is open and focused in states-editor-controller. :return: True if a undo was performed, False if focus on source-editor. :rtype: bool
6.222304
5.263705
1.182115
if self.has_error: exception_data = self._raw.get("exception", {}) return exception_data.get("message") return None
def exception_message(self) -> Union[str, None]
On Lavalink V3, if there was an exception during a load or get tracks call this property will be populated with the error message. If there was no error this property will be ``None``.
4.908858
4.834388
1.015404
self.__check_node_ready() url = self._uri + quote(str(query)) data = await self._get(url) if isinstance(data, dict): return LoadResult(data) elif isinstance(data, list): modified_data = { "loadType": LoadType.V2_COMPAT, "tracks": data } return LoadResult(modified_data)
async def load_tracks(self, query) -> LoadResult
Executes a loadtracks request. Only works on Lavalink V3. Parameters ---------- query : str Returns ------- LoadResult
5.04823
5.616357
0.898844
if not self._warned: log.warn("get_tracks() is now deprecated. Please switch to using load_tracks().") self._warned = True result = await self.load_tracks(query) return result.tracks
async def get_tracks(self, query) -> Tuple[Track, ...]
Gets tracks from lavalink. Parameters ---------- query : str Returns ------- Tuple[Track, ...]
4.483798
4.900452
0.914976
rel_path = os.path.join(*rel_path) target_path = os.path.join("share", *rel_path.split(os.sep)[1:]) # remove source/ (package_dir) if "path_to_file" in kwargs and kwargs["path_to_file"]: source_files = [rel_path] target_path = os.path.dirname(target_path) else: source_files = [os.path.join(rel_path, filename) for filename in os.listdir(rel_path)] return target_path, source_files
def get_data_files_tuple(*rel_path, **kwargs)
Return a tuple which can be used for setup.py's data_files :param tuple path: List of path elements pointing to a file or a directory of files :param dict kwargs: Set path_to_file to True is `path` points to a file :return: tuple of install directory and list of source files :rtype: tuple(str, [str])
3.147067
2.813834
1.118427
result_list = list() rel_root_dir = os.path.join(*rel_root_path) share_target_root = os.path.join("share", kwargs.get("share_target_root", "rafcon")) distutils.log.debug("recursively generating data files for folder '{}' ...".format( rel_root_dir)) for dir_, _, files in os.walk(rel_root_dir): relative_directory = os.path.relpath(dir_, rel_root_dir) file_list = list() for fileName in files: rel_file_path = os.path.join(relative_directory, fileName) abs_file_path = os.path.join(rel_root_dir, rel_file_path) file_list.append(abs_file_path) if len(file_list) > 0: # this is a valid path in ~/.local folder: e.g. share/rafcon/libraries/generic/wait target_path = os.path.join(share_target_root, relative_directory) result_list.append((target_path, file_list)) return result_list
def get_data_files_recursively(*rel_root_path, **kwargs)
Adds all files of the specified path to a data_files compatible list :param tuple rel_root_path: List of path elements pointing to a directory of files :return: list of tuples of install directory and list of source files :rtype: list(tuple(str, [str]))
3.211619
3.237546
0.991992
assets_folder = path.join('source', 'rafcon', 'gui', 'assets') share_folder = path.join(assets_folder, 'share') themes_folder = path.join(share_folder, 'themes', 'RAFCON') examples_folder = path.join('share', 'examples') libraries_folder = path.join('share', 'libraries') gui_data_files = [ get_data_files_tuple(assets_folder, 'splashscreens'), get_data_files_tuple(assets_folder, 'fonts', 'FontAwesome'), get_data_files_tuple(assets_folder, 'fonts', 'Source Sans Pro'), get_data_files_tuple(themes_folder, 'gtk-3.0', 'gtk.css', path_to_file=True), get_data_files_tuple(themes_folder, 'gtk-3.0', 'gtk-dark.css', path_to_file=True), get_data_files_tuple(themes_folder, 'assets'), get_data_files_tuple(themes_folder, 'sass'), get_data_files_tuple(themes_folder, 'gtk-sourceview'), get_data_files_tuple(themes_folder, 'colors.json', path_to_file=True), get_data_files_tuple(themes_folder, 'colors-dark.json', path_to_file=True) ] # print("gui_data_files", gui_data_files) icon_data_files = get_data_files_recursively(path.join(share_folder, 'icons'), share_target_root="icons") # print("icon_data_files", icon_data_files) locale_data_files = create_mo_files() # example tuple # locale_data_files = [('share/rafcon/locale/de/LC_MESSAGES', ['source/rafcon/locale/de/LC_MESSAGES/rafcon.mo'])] # print("locale_data_files", locale_data_files) version_data_file = [("./", ["VERSION"])] desktop_data_file = [("share/applications", [path.join('share', 'applications', 'de.dlr.rm.RAFCON.desktop')])] examples_data_files = get_data_files_recursively(examples_folder, share_target_root=path.join("rafcon", "examples")) libraries_data_files = get_data_files_recursively(libraries_folder, share_target_root=path.join("rafcon", "libraries")) generated_data_files = gui_data_files + icon_data_files + locale_data_files + version_data_file + \ desktop_data_file + examples_data_files + libraries_data_files # for elem in generated_data_files: # print(elem) return generated_data_files
def generate_data_files()
Generate the data_files list used in the setup function :return: list of tuples of install directory and list of source files :rtype: list(tuple(str, [str]))
2.764007
2.734873
1.010653
# Allow to handle a subset of events while having a grabbed tool (between a button press & release event) suppressed_grabbed_tool = None if event.type in (Gdk.EventType.SCROLL, Gdk.EventType.KEY_PRESS, Gdk.EventType.KEY_RELEASE): suppressed_grabbed_tool = self._grabbed_tool self._grabbed_tool = None rt = super(ToolChain, self).handle(event) if suppressed_grabbed_tool: self._grabbed_tool = suppressed_grabbed_tool return rt
def handle(self, event)
Handle the event by calling each tool until the event is handled or grabbed. If a tool is returning True on a button press event, the motion and button release events are also passed to this
4.644051
4.386493
1.058716
view = self.view if self._move_name_v: yield InMotion(self._item, view) else: selected_items = set(view.selected_items) for item in selected_items: if not isinstance(item, Item): continue yield InMotion(item, view)
def movable_items(self)
Filter selection Filter items of selection that cannot be moved (i.e. are not instances of `Item`) and return the rest.
6.515059
5.83957
1.115674
if event.get_button()[1] not in self._buttons: return False # Only handle events for registered buttons (left mouse clicks) if event.get_state()[1] & constants.RUBBERBAND_MODIFIER: return False # Mouse clicks with pressed shift key are handled in another tool # Special case: moving the NameView # This is only allowed, if the hovered item is a NameView and the Ctrl-key is pressed and the only selected # item is the parental StateView. In this case, the selection and _item will no longer be looked at, # but only _move_name_v self._item = self.get_item() if isinstance(self._item, NameView): selected_items = self.view.selected_items if event.get_state()[1] & Gdk.ModifierType.CONTROL_MASK and len(selected_items) == 1 and next(iter(selected_items)) is \ self._item.parent: self._move_name_v = True else: self._item = self._item.parent if not self._move_name_v: self._old_selection = self.view.selected_items if self._item not in self.view.selected_items: # When items are to be moved, a button-press should not cause any deselection. # However, the selection is stored, in case no move operation is performed. self.view.handle_new_selection(self._item) if not self.view.is_focus(): self.view.grab_focus() return True
def on_button_press(self, event)
Select items When the mouse button is pressed, the selection is updated. :param event: The button event
6.300363
6.429091
0.979977
affected_models = {} for inmotion in self._movable_items: inmotion.move((event.x, event.y)) rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item, inmotion.item.handles()[NW]) if isinstance(inmotion.item, StateView): state_v = inmotion.item state_m = state_v.model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()['rel_pos'] != rel_pos: state_m.set_meta_data_editor('rel_pos', rel_pos) affected_models[state_m] = ("position", True, state_v) elif isinstance(inmotion.item, NameView): state_v = inmotion.item state_m = self.view.canvas.get_parent(state_v).model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()['name']['rel_pos'] != rel_pos: state_m.set_meta_data_editor('name.rel_pos', rel_pos) affected_models[state_m] = ("name_position", False, state_v) elif isinstance(inmotion.item, TransitionView): transition_v = inmotion.item transition_m = transition_v.model self.view.canvas.request_update(transition_v) current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v) old_waypoints = transition_m.get_meta_data_editor()['waypoints'] if current_waypoints != old_waypoints: transition_m.set_meta_data_editor('waypoints', current_waypoints) affected_models[transition_m] = ("waypoints", False, transition_v) if len(affected_models) == 1: model = next(iter(affected_models)) change, affects_children, view = affected_models[model] self.view.graphical_editor.emit('meta_data_changed', model, change, affects_children) elif len(affected_models) > 1: # if more than one item has been moved, we need to call the meta_data_changed signal on a common parent common_parents = None for change, affects_children, view in affected_models.values(): parents_of_view = set(self.view.canvas.get_ancestors(view)) if common_parents is None: common_parents = parents_of_view else: common_parents = common_parents.intersection(parents_of_view) assert len(common_parents) > 0, "The selected elements do not have common parent element" for state_v in common_parents: # Find most nested state_v children_of_state_v = self.view.canvas.get_all_children(state_v) if any(common_parent in children_of_state_v for common_parent in common_parents): continue self.view.graphical_editor.emit('meta_data_changed', state_v.model, "positions", True) break if not affected_models and self._old_selection is not None: # The selection is handled differently depending on whether states were moved or not # If no move operation was performed, we reset the selection to that is was before the button-press event # and let the state machine selection handle the selection self.view.unselect_all() self.view.select_item(self._old_selection) self.view.handle_new_selection(self._item) self._move_name_v = False self._old_selection = None return super(MoveItemTool, self).on_button_release(event)
def on_button_release(self, event)
Write back changes If one or more items have been moved, the new position are stored in the corresponding meta data and a signal notifying the change is emitted. :param event: The button event
3.030632
3.005596
1.00833
if not items: return items top_most_item = items[0] # If the hovered item is e.g. a connection, we need to get the parental state top_most_state_v = top_most_item if isinstance(top_most_item, StateView) else top_most_item.parent state = top_most_state_v.model.state global_gui_config = gui_helper_state_machine.global_gui_config if global_gui_config.get_config_value('STATE_SELECTION_INSIDE_LIBRARY_STATE_ENABLED'): # select the library state instead of the library_root_state because it is hidden if state.is_root_state_of_library: new_topmost_item = self.view.canvas.get_view_for_core_element(state.parent) return self.dismiss_upper_items(items, new_topmost_item) return items else: # Find state_copy of uppermost LibraryState library_root_state = state.get_uppermost_library_root_state() # If the hovered element is a child of a library, make the library the hovered_item if library_root_state: library_state = library_root_state.parent library_state_v = self.view.canvas.get_view_for_core_element(library_state) return self.dismiss_upper_items(items, library_state_v) return items
def _filter_library_state(self, items)
Filters out child elements of library state when they cannot be hovered Checks if hovered item is within a LibraryState * if not, the list is returned unfiltered * if so, STATE_SELECTION_INSIDE_LIBRARY_STATE_ENABLED is checked * if enabled, the library is selected (instead of the state copy) * if not, the upper most library is selected :param list items: Sorted list of items beneath the cursor :return: filtered items :rtype: list
5.132707
4.227069
1.214247
items = self._filter_library_state(items) if not items: return items top_most_item = items[0] second_top_most_item = items[1] if len(items) > 1 else None # States/Names take precedence over connections if the connections are on the same hierarchy and if there is # a port beneath the cursor first_state_v = next(filter(lambda item: isinstance(item, (NameView, StateView)), items)) first_state_v = first_state_v.parent if isinstance(first_state_v, NameView) else first_state_v if first_state_v: # There can be several connections above the state/name skip those and find the first non-connection-item for item in items: if isinstance(item, ConnectionView): # connection is on the same hierarchy level as the state/name, thus we dismiss it if self.view.canvas.get_parent(top_most_item) is not first_state_v: continue break # Connections are only dismissed, if there is a port beneath the cursor. Search for ports here: port_beneath_cursor = False state_ports = first_state_v.get_all_ports() position = self.view.get_matrix_v2i(first_state_v).transform_point(event.x, event.y) i2v_matrix = self.view.get_matrix_i2v(first_state_v) for port_v in state_ports: item_distance = port_v.port.glue(position)[1] view_distance = i2v_matrix.transform_distance(item_distance, 0)[0] if view_distance == 0: port_beneath_cursor = True break if port_beneath_cursor: items = self.dismiss_upper_items(items, item) top_most_item = items[0] second_top_most_item = items[1] if len(items) > 1 else None # NameView can only be hovered if it or its parent state is selected if isinstance(top_most_item, NameView): state_v = second_top_most_item # second item in the list must be the parent state of the NameView if state_v not in self.view.selected_items and top_most_item not in self.view.selected_items: items = items[1:] return items
def _filter_hovered_items(self, items, event)
Filters out items that cannot be hovered :param list items: Sorted list of items beneath the cursor :param Gtk.Event event: Motion event :return: filtered items :rtype: list
4.152774
4.124651
1.006818
self.queue_draw(self.view) x0, y0, x1, y1 = self.x0, self.y0, self.x1, self.y1 rectangle = (min(x0, x1), min(y0, y1), abs(x1 - x0), abs(y1 - y0)) selected_items = self.view.get_items_in_rectangle(rectangle, intersect=False) self.view.handle_new_selection(selected_items) return True
def on_button_release(self, event)
Select or deselect rubber banded groups of items The selection of elements is prior and never items are selected or deselected at the same time.
2.944931
2.834742
1.038871
if not event.get_button()[1] == 1: # left mouse button return False view = self.view if isinstance(view.hovered_item, StateView): distance = view.hovered_item.border_width / 2. item, handle = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y), distance) else: item, handle = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y)) if not handle: return False # Only move ports when the MOVE_PORT_MODIFIER key is pressed if isinstance(item, (StateView, PortView)) and \ handle in [port.handle for port in item.get_all_ports()] and \ not (event.get_state()[1] & constants.MOVE_PORT_MODIFIER): return False # Do not move from/to handles of connections (only their waypoints) if isinstance(item, ConnectionView) and handle in item.end_handles(include_waypoints=True): return False if handle: view.hovered_item = item self.motion_handle = None self.grab_handle(item, handle) return True
def on_button_press(self, event)
Handle button press events. If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be dragged around.
4.718287
4.486949
1.051558
item = self.grabbed_item handle = self.grabbed_handle pos = event.x, event.y self.motion_handle = HandleInMotion(item, handle, self.view) self.motion_handle.GLUE_DISTANCE = self._parent_state_v.border_width self.motion_handle.start_move(pos)
def _set_motion_handle(self, event)
Sets motion handle to currently grabbed handle
7.341442
6.443148
1.139418
if self._is_transition: self._connection_v = TransitionPlaceholderView(self._parent_state_v.hierarchy_level) else: self._connection_v = DataFlowPlaceholderView(self._parent_state_v.hierarchy_level) self.view.canvas.add(self._connection_v, self._parent_state_v)
def _create_temporary_connection(self)
Creates a placeholder connection view :return: New placeholder connection :rtype: rafcon.gui.mygaphas.items.connection.ConnectionPlaceholderView
6.550096
4.986767
1.313495
def sink_set_and_differs(sink_a, sink_b): if not sink_a: return False if not sink_b: return True if sink_a.port != sink_b.port: return True return False if sink_set_and_differs(old_sink, new_sink): sink_port_v = old_sink.port.port_v self._disconnect_temporarily(sink_port_v, target=of_target) if sink_set_and_differs(new_sink, old_sink): sink_port_v = new_sink.port.port_v self._connect_temporarily(sink_port_v, target=of_target)
def _handle_temporary_connection(self, old_sink, new_sink, of_target=True)
Connect connection to new_sink If new_sink is set, the connection origin or target will be set to new_sink. The connection to old_sink is being removed. :param gaphas.aspect.ConnectionSink old_sink: Old sink (if existing) :param gaphas.aspect.ConnectionSink new_sink: New sink (if existing) :param bool of_target: Whether the origin or target will be reconnected :return:
2.567623
2.520963
1.018509
if target: handle = self._connection_v.to_handle() else: handle = self._connection_v.from_handle() port_v.add_connected_handle(handle, self._connection_v, moving=True) port_v.tmp_connect(handle, self._connection_v) self._connection_v.set_port_for_handle(port_v, handle) # Redraw state of port to make hover state visible self._redraw_port(port_v)
def _connect_temporarily(self, port_v, target=True)
Set a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port to be connected :param bool target: Whether the connection origin or target should be connected
5.21373
5.215847
0.999594
if target: handle = self._connection_v.to_handle() else: handle = self._connection_v.from_handle() port_v.remove_connected_handle(handle) port_v.tmp_disconnect() self._connection_v.reset_port_for_handle(handle) # Redraw state of port to make hover state visible self._redraw_port(port_v)
def _disconnect_temporarily(self, port_v, target=True)
Removes a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port that was connected :param bool target: Whether the connection origin or target should be disconnected
6.231379
6.295002
0.989893
if not event.get_button()[1] == 1: # left mouse button return False view = self.view item, handle = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y)) if not handle: # Require a handle return False # Connection handle must belong to a port and the MOVE_PORT_MODIFIER must not be pressed if not isinstance(item, StateView) or handle not in [port.handle for port in item.get_all_ports()] or ( event.get_state()[1] & constants.MOVE_PORT_MODIFIER): return False for port in item.get_all_ports(): if port.handle is handle: self._start_port_v = port if port in item.get_logic_ports(): self._is_transition = True if port is item.income or isinstance(port, InputPortView) or port in item.scoped_variables: self._parent_state_v = port.parent elif port.parent.parent: self._parent_state_v = port.parent.parent else: # Outgoing port of the root state was clicked on, no connection can be drawn here self._parent_state_v = None return True
def on_button_press(self, event)
Handle button press events. If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be dragged around.
6.199263
6.060274
1.022935
if not event.get_button()[1] == 1: # left mouse button return False view = self.view item, handle = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y)) # Handle must be the end handle of a connection if not handle or not isinstance(item, ConnectionView) or handle not in item.end_handles(): return False if handle is item.from_handle(): self._start_port_v = item.from_port else: self._start_port_v = item.to_port self._parent_state_v = item.parent self._end_handle = handle if isinstance(item, TransitionView): self._is_transition = True self._connection_v = item return True
def on_button_press(self, event)
Handle button press events. If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be dragged around.
5.199651
5.085186
1.022509
super(ToolBarController, self).register_view(view) self.view['button_new'].connect('clicked', self.on_button_new_clicked) self.view['button_open'].connect('clicked', self.on_button_open_clicked) self.view['button_save'].connect('clicked', self.on_button_save_clicked) self.view['button_refresh'].connect('clicked', self.on_button_refresh_clicked) self.view['button_refresh_selected'].connect('clicked', self.on_button_refresh_selected_clicked) self.view['button_refresh_libs'].connect('clicked', self.on_button_refresh_libs_clicked) self.view['button_bake_state_machine'].connect('clicked', self.on_button_bake_state_machine_clicked)
def register_view(self, view)
Called when the View was registered
1.899487
1.87476
1.013189
logger.debug("Initializing LibraryManager: Loading libraries ... ") self._libraries = {} self._library_root_paths = {} self._replaced_libraries = {} self._skipped_states = [] self._skipped_library_roots = [] # 1. Load libraries from config.yaml for library_root_key, library_root_path in config.global_config.get_config_value("LIBRARY_PATHS").items(): library_root_path = self._clean_path(library_root_path) if os.path.exists(library_root_path): logger.debug("Adding library root key '{0}' from path '{1}'".format( library_root_key, library_root_path)) self._load_libraries_from_root_path(library_root_key, library_root_path) else: logger.warning("Configured path for library root key '{}' does not exist: {}".format( library_root_key, library_root_path)) # 2. Load libraries from RAFCON_LIBRARY_PATH library_path_env = os.environ.get('RAFCON_LIBRARY_PATH', '') library_paths = set(library_path_env.split(os.pathsep)) for library_root_path in library_paths: if not library_root_path: continue library_root_path = self._clean_path(library_root_path) if not os.path.exists(library_root_path): logger.warning("The library specified in RAFCON_LIBRARY_PATH does not exist: {}".format(library_root_path)) continue _, library_root_key = os.path.split(library_root_path) if library_root_key in self._libraries: if os.path.realpath(self._library_root_paths[library_root_key]) == os.path.realpath(library_root_path): logger.info("The library root key '{}' and root path '{}' exists multiple times in your environment" " and will be skipped.".format(library_root_key, library_root_path)) else: logger.warning("The library '{}' is already existing and will be overridden with '{}'".format( library_root_key, library_root_path)) self._load_libraries_from_root_path(library_root_key, library_root_path) else: self._load_libraries_from_root_path(library_root_key, library_root_path) logger.debug("Adding library '{1}' from {0}".format(library_root_path, library_root_key)) self._libraries = OrderedDict(sorted(self._libraries.items())) logger.debug("Initialization of LibraryManager done")
def initialize(self)
Initializes the library manager It searches through all library paths given in the config file for libraries, and loads the states. This cannot be done in the __init__ function as the library_manager can be compiled and executed by singleton.py before the state*.pys are loaded
2.384995
2.326799
1.025011
path = path.replace('"', '') path = path.replace("'", '') # Replace ~ with /home/user path = os.path.expanduser(path) # Replace environment variables path = os.path.expandvars(path) # If the path is relative, assume it is relative to the config file directory if not os.path.isabs(path): path = os.path.join(config.global_config.path, path) # Clean path, e.g. replace /./ with / path = os.path.abspath(path) # Eliminate symbolic links path = os.path.realpath(path) return path
def _clean_path(path)
Create a fully fissile absolute system path with no symbolic links and environment variables
2.880808
2.767839
1.040815
for library_name in os.listdir(library_path): library_folder_path, library_name = self.check_clean_path_of_library(library_path, library_name) full_library_path = os.path.join(library_path, library_name) if os.path.isdir(full_library_path) and library_name[0] != '.': if os.path.exists(os.path.join(full_library_path, storage.STATEMACHINE_FILE)) \ or os.path.exists(os.path.join(full_library_path, storage.STATEMACHINE_FILE_OLD)): target_dict[library_name] = full_library_path else: target_dict[library_name] = {} self._load_nested_libraries(full_library_path, target_dict[library_name]) target_dict[library_name] = OrderedDict(sorted(target_dict[library_name].items()))
def _load_nested_libraries(self, library_path, target_dict)
Recursively load libraries within path Adds all libraries specified in a given path and stores them into the provided library dictionary. The library entries in the dictionary consist only of the path to the library in the file system. :param library_path: the path to add all libraries from :param target_dict: the target dictionary to store all loaded libraries to
2.134261
2.230484
0.95686
if library_path is None or library_name is None: return None path_list = library_path.split(os.sep) target_lib_dict = self.libraries # go down the path to the correct library for path_element in path_list: if path_element not in target_lib_dict: # Library cannot be found target_lib_dict = None break target_lib_dict = target_lib_dict[path_element] return None if target_lib_dict is None or library_name not in target_lib_dict else target_lib_dict[library_name]
def _get_library_os_path_from_library_dict_tree(self, library_path, library_name)
Hand verified library os path from libraries dictionary tree.
2.471174
2.298716
1.075023
path = os.path.realpath(path) library_root_key = None for library_root_key, library_root_path in self._library_root_paths.items(): rel_path = os.path.relpath(path, library_root_path) if rel_path.startswith('..'): library_root_key = None continue else: break return library_root_key
def _get_library_root_key_for_os_path(self, path)
Return library root key if path is within library root paths
1.990038
1.777869
1.119339
library_path = None library_name = None library_root_key = self._get_library_root_key_for_os_path(path) if library_root_key is not None: library_root_path = self._library_root_paths[library_root_key] path_elements_without_library_root = path[len(library_root_path)+1:].split(os.sep) library_name = path_elements_without_library_root[-1] sub_library_path = '' if len(path_elements_without_library_root[:-1]): sub_library_path = os.sep + os.sep.join(path_elements_without_library_root[:-1]) library_path = library_root_key + sub_library_path return library_path, library_name
def get_library_path_and_name_for_os_path(self, path)
Generate valid library_path and library_name The method checks if the given os path is in the list of loaded library root paths and use respective library root key/mounting point to concatenate the respective library_path and separate respective library_name. :param str path: A library os path a library is situated in. :return: library path library name :rtype: str, str
2.012537
1.983921
1.014424
if self.is_library_in_libraries(library_path, library_name): from rafcon.core.states.library_state import LibraryState return LibraryState(library_path, library_name, "0.1") else: logger.warning("Library manager will not create a library instance which is not in the mounted libraries.")
def get_library_instance(self, library_path, library_name)
Generate a Library instance from within libraries dictionary tree.
6.63076
6.00289
1.104595
# originally libraries were called like this; DO NOT DELETE; interesting for performance tests # state_machine = storage.load_state_machine_from_path(lib_os_path) # return state_machine.version, state_machine.root_state # TODO observe changes on file system and update data if lib_os_path in self._loaded_libraries: # this list can also be taken to open library state machines TODO -> implement it -> because faster state_machine = self._loaded_libraries[lib_os_path] # logger.info("Take copy of {0}".format(lib_os_path)) # as long as the a library state root state is never edited so the state first has to be copied here state_copy = copy.deepcopy(state_machine.root_state) return state_machine.version, state_copy else: state_machine = storage.load_state_machine_from_path(lib_os_path) self._loaded_libraries[lib_os_path] = state_machine if config.global_config.get_config_value("NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED", False): return state_machine.version, state_machine.root_state else: state_copy = copy.deepcopy(state_machine.root_state) return state_machine.version, state_copy
def get_library_state_copy_instance(self, lib_os_path)
A method to get a state copy of the library specified via the lib_os_path. :param lib_os_path: the location of the library to get a copy for :return:
5.376646
5.554215
0.96803
library_file_system_path = self.get_os_path_to_library(library_path, library_name)[0] shutil.rmtree(library_file_system_path) self.refresh_libraries()
def remove_library_from_file_system(self, library_path, library_name)
Remove library from hard disk.
3.392913
3.125491
1.085562
mask = network.buses.v_nom.isin(voltage_level) df = network.buses[mask] return df.index
def buses_of_vlvl(network, voltage_level)
Get bus-ids of given voltage level(s). Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA voltage_level: list Returns ------- list List containing bus-ids.
6.216528
6.58532
0.943998
mask = ((network.buses.index.isin(network.lines.bus0) | (network.buses.index.isin(network.lines.bus1))) & (network.buses.v_nom.isin(voltage_level))) df = network.buses[mask] return df.index
def buses_grid_linked(network, voltage_level)
Get bus-ids of a given voltage level connected to the grid. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA voltage_level: list Returns ------- list List containing bus-ids.
3.495089
3.614209
0.967041
# get foreign buses by country foreign_buses = network.buses[network.buses.country_code != 'DE'] network.buses = network.buses.drop( network.buses.loc[foreign_buses.index].index) # identify transborder lines (one bus foreign, one bus not) and the country # it is coming from # drop foreign components network.lines = network.lines.drop(network.lines[ (network.lines['bus0'].isin(network.buses.index) == False) | (network.lines['bus1'].isin(network.buses.index) == False)].index) network.links = network.links.drop(network.links[ (network.links['bus0'].isin(network.buses.index) == False) | (network.links['bus1'].isin(network.buses.index) == False)].index) network.transformers = network.transformers.drop(network.transformers[ (network.transformers['bus0'].isin(network.buses.index) == False) | (network.transformers['bus1'].isin(network. buses.index) == False)].index) network.generators = network.generators.drop(network.generators[ (network.generators['bus'].isin(network.buses.index) == False)].index) network.loads = network.loads.drop(network.loads[ (network.loads['bus'].isin(network.buses.index) == False)].index) network.storage_units = network.storage_units.drop(network.storage_units[ (network.storage_units['bus'].isin(network. buses.index) == False)].index) components = ['loads', 'generators', 'lines', 'buses', 'transformers', 'links'] for g in components: # loads_t h = g + '_t' nw = getattr(network, h) # network.loads_t for i in nw.keys(): # network.loads_t.p cols = [j for j in getattr( nw, i).columns if j not in getattr(network, g).index] for k in cols: del getattr(nw, i)[k] return network
def clip_foreign(network)
Delete all components and timelines located outside of Germany. Add transborder flows divided by country of origin as network.foreign_trade. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
2.524439
2.412168
1.046544
foreign_buses = network.buses[network.buses.country_code != 'DE'] foreign_lines = network.lines[network.lines.bus0.astype(str).isin( foreign_buses.index) | network.lines.bus1.astype(str).isin( foreign_buses.index)] foreign_links = network.links[network.links.bus0.astype(str).isin( foreign_buses.index) | network.links.bus1.astype(str).isin( foreign_buses.index)] network.links = network.links.drop( network.links.index[network.links.index.isin(foreign_links.index) & network.links.bus0.isin(network.links.bus1) & (network.links.bus0 > network.links.bus1)]) foreign_links = network.links[network.links.bus0.astype(str).isin( foreign_buses.index) | network.links.bus1.astype(str).isin( foreign_buses.index)] network.links.loc[foreign_links.index, 'p_min_pu'] = -1 network.links.loc[foreign_links.index, 'efficiency'] = 1 network.import_components_from_dataframe( foreign_lines.loc[:, ['bus0', 'bus1', 'capital_cost', 'length']] .assign(p_nom=foreign_lines.s_nom).assign(p_min_pu=-1) .set_index('N' + foreign_lines.index), 'Link') network.lines = network.lines.drop(foreign_lines.index) return network
def foreign_links(network)
Change transmission technology of foreign lines from AC to DC (links). Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
2.584567
2.480592
1.041915
foreign_buses = network.buses[network.buses.country_code != 'DE'] network.loads_t['q_set'][network.loads.index[ network.loads.bus.astype(str).isin(foreign_buses.index)]] = \ network.loads_t['p_set'][network.loads.index[ network.loads.bus.astype(str).isin( foreign_buses.index)]] * math.tan(math.acos(cos_phi)) network.generators.control[network.generators.control == 'PQ'] = 'PV' return network
def set_q_foreign_loads(network, cos_phi=1)
Set reative power timeseries of loads in neighbouring countries Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA cos_phi: float Choose ration of active and reactive power of foreign loads Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
3.654483
3.682743
0.992326
mask = network.lines.bus1.isin(busids) |\ network.lines.bus0.isin(busids) return network.lines[mask]
def connected_grid_lines(network, busids)
Get grid lines connected to given buses. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA busids : list List containing bus-ids. Returns ------- :class:`pandas.DataFrame PyPSA lines.
3.824444
5.321826
0.718634
mask = (network.transformers.bus0.isin(busids)) return network.transformers[mask]
def connected_transformer(network, busids)
Get transformer connected to given buses. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA busids : list List containing bus-ids. Returns ------- :class:`pandas.DataFrame PyPSA transformer.
7.901562
10.373949
0.761674
marginal_cost_def = 10000 # network.generators.marginal_cost.max()*2 p_nom_def = network.loads_t.p_set.max().max() marginal_cost = kwargs.get('marginal_cost', marginal_cost_def) p_nom = kwargs.get('p_nom', p_nom_def) network.add("Carrier", "load") start = network.generators.index.to_series().str.rsplit( ' ').str[0].astype(int).sort_values().max() + 1 index = list(range(start, start + len(network.buses.index))) network.import_components_from_dataframe( pd.DataFrame( dict(marginal_cost=marginal_cost, p_nom=p_nom, carrier='load shedding', bus=network.buses.index), index=index), "Generator" ) return
def load_shedding(network, **kwargs)
Implement load shedding in existing network to identify feasibility problems Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA marginal_cost : int Marginal costs for load shedding p_nom : int Installed capacity of load shedding generator Returns -------
4.059524
3.490202
1.16312
from shapely.geometry import Point, LineString, MultiLineString from geoalchemy2.shape import from_shape, to_shape # add connection from Luebeck to Siems new_bus = str(network.buses.index.astype(np.int64).max() + 1) new_trafo = str(network.transformers.index.astype(np.int64).max() + 1) new_line = str(network.lines.index.astype(np.int64).max() + 1) network.add("Bus", new_bus, carrier='AC', v_nom=220, x=10.760835, y=53.909745) network.add("Transformer", new_trafo, bus0="25536", bus1=new_bus, x=1.29960, tap_ratio=1, s_nom=1600) network.add("Line", new_line, bus0="26387", bus1=new_bus, x=0.0001, s_nom=1600) network.lines.loc[new_line, 'cables'] = 3.0 # bus geom point_bus1 = Point(10.760835, 53.909745) network.buses.set_value(new_bus, 'geom', from_shape(point_bus1, 4326)) # line geom/topo network.lines.set_value(new_line, 'geom', from_shape(MultiLineString( [LineString([to_shape(network. buses.geom['26387']), point_bus1])]), 4326)) network.lines.set_value(new_line, 'topo', from_shape(LineString( [to_shape(network.buses.geom['26387']), point_bus1]), 4326)) # trafo geom/topo network.transformers.set_value(new_trafo, 'geom', from_shape(MultiLineString( [LineString( [to_shape(network .buses.geom['25536']), point_bus1])]), 4326)) network.transformers.set_value(new_trafo, 'topo', from_shape( LineString([to_shape(network.buses.geom['25536']), point_bus1]), 4326)) return
def data_manipulation_sh(network)
Adds missing components to run calculations with SH scenarios. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA
2.662034
2.660653
1.000519
path = args['csv_export'] if path == False: return None if not os.path.exists(path): os.makedirs(path, exist_ok=True) network.export_to_csv_folder(path) data = pd.read_csv(os.path.join(path, 'network.csv')) data['time'] = network.results['Solver'].Time data = data.apply(_enumerate_row, axis=1) data.to_csv(os.path.join(path, 'network.csv'), index=False) with open(os.path.join(path, 'args.json'), 'w') as fp: json.dump(args, fp) if not isinstance(pf_solution, type(None)): pf_solution.to_csv(os.path.join(path, 'pf_solution.csv'), index=True) if hasattr(network, 'Z'): file = [i for i in os.listdir( path.strip('0123456789')) if i == 'Z.csv'] if file: print('Z already calculated') else: network.Z.to_csv(path.strip('0123456789') + '/Z.csv', index=False) return
def results_to_csv(network, args, pf_solution=None)
Function the writes the calaculation results in csv-files in the desired directory. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA args: dict Contains calculation settings of appl.py pf_solution: pandas.Dataframe or None If pf was calculated, df containing information of convergence else None.
2.705808
2.771883
0.976162
print("Performing linear OPF, {} snapshot(s) at a time:". format(group_size)) t = time.time() for i in range(int((args['end_snapshot'] - args['start_snapshot'] + 1) / group_size)): if i > 0: network.storage_units.state_of_charge_initial = network.\ storage_units_t.state_of_charge.loc[ network.snapshots[group_size * i - 1]] network.lopf(network.snapshots[ group_size * i:group_size * i + group_size], solver_name=args['solver_name'], solver_options=args['solver_options'], extra_functionality=extra_functionality) network.lines.s_nom = network.lines.s_nom_opt print(time.time() - t / 60) return
def parallelisation(network, args, group_size, extra_functionality=None)
Function that splits problem in selected number of snapshot groups and runs optimization successive for each group. Not useful for calculations with storage untis or extension. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA args: dict Contains calculation settings of appl.py Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
4.250364
3.895481
1.091101
old_slack = network.generators.index[network. generators.control == 'Slack'][0] # check if old slack was PV or PQ control: if network.generators.p_nom[old_slack] > 50 and network.generators.\ carrier[old_slack] in ('solar', 'wind'): old_control = 'PQ' elif network.generators.p_nom[old_slack] > 50 and network.generators.\ carrier[old_slack] not in ('solar', 'wind'): old_control = 'PV' elif network.generators.p_nom[old_slack] < 50: old_control = 'PQ' old_gens = network.generators gens_summed = network.generators_t.p.sum() old_gens['p_summed'] = gens_summed max_gen_buses_index = old_gens.groupby(['bus']).agg( {'p_summed': np.sum}).p_summed.sort_values().index for bus_iter in range(1, len(max_gen_buses_index) - 1): if old_gens[(network. generators['bus'] == max_gen_buses_index[-bus_iter]) & (network.generators['control'] == 'PV')].empty: continue else: new_slack_bus = max_gen_buses_index[-bus_iter] break network.generators = network.generators.drop('p_summed', 1) new_slack_gen = network.generators.\ p_nom[(network.generators['bus'] == new_slack_bus) & ( network.generators['control'] == 'PV')].sort_values().index[-1] network.generators = network.generators.set_value( old_slack, 'control', old_control) network.generators = network.generators.set_value( new_slack_gen, 'control', 'Slack') return network
def set_slack(network)
Function that chosses the bus with the maximum installed power as slack Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
3.041092
2.898739
1.049109
network.allocation = allocation if allocation == 'p': p_sum = network.generators_t['p'].\ groupby(network.generators.bus, axis=1).sum().\ add(network.storage_units_t['p'].abs().groupby( network.storage_units.bus, axis=1).sum(), fill_value=0) q_sum = network.generators_t['q'].\ groupby(network.generators.bus, axis=1).sum() q_distributed = network.generators_t.p / \ p_sum[network.generators.bus.sort_index()].values * \ q_sum[network.generators.bus.sort_index()].values q_storages = network.storage_units_t.p / \ p_sum[network.storage_units.bus.sort_index()].values *\ q_sum[network.storage_units.bus.sort_index()].values if allocation == 'p_nom': q_bus = network.generators_t['q'].\ groupby(network.generators.bus, axis=1).sum().add( network.storage_units_t.q.groupby( network.storage_units.bus, axis = 1).sum(), fill_value=0) p_nom_dist = network.generators.p_nom_opt.sort_index() p_nom_dist[p_nom_dist.index.isin(network.generators.index [network.generators.carrier == 'load shedding'])] = 0 q_distributed = q_bus[ network.generators.bus].multiply(p_nom_dist.values) /\ (network.generators.p_nom_opt[network.generators.carrier != 'load shedding'].groupby( network.generators.bus).sum().add( network.storage_units.p_nom_opt.groupby (network.storage_units.bus).sum(), fill_value=0))[ network.generators.bus.sort_index()].values q_distributed.columns = network.generators.index q_storages = q_bus[network.storage_units.bus]\ .multiply(network.storage_units.p_nom_opt.values) / \ ((network.generators.p_nom_opt[network.generators.carrier != 'load shedding'].groupby( network.generators.bus).sum().add( network.storage_units.p_nom_opt. groupby(network.storage_units.bus).sum(), fill_value=0))[ network.storage_units.bus].values) q_storages.columns = network.storage_units.index q_distributed[q_distributed.isnull()] = 0 q_distributed[q_distributed.abs() == np.inf] = 0 q_storages[q_storages.isnull()] = 0 q_storages[q_storages.abs() == np.inf] = 0 network.generators_t.q = q_distributed network.storage_units_t.q = q_storages return network
def distribute_q(network, allocation='p_nom')
Function that distributes reactive power at bus to all installed generators and storages. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA allocation: str Choose key to distribute reactive power: 'p_nom' to dirstribute via p_nom 'p' to distribute via p_set Returns -------
2.190187
2.217782
0.987558
# Line losses # calculate apparent power S = sqrt(p² + q²) [in MW] s0_lines = ((network.lines_t.p0**2 + network.lines_t.q0**2). apply(np.sqrt)) # calculate current I = S / U [in A] i0_lines = np.multiply(s0_lines, 1000000) / \ np.multiply(network.lines.v_nom, 1000) # calculate losses per line and timestep network.\ # lines_t.line_losses = I² * R [in MW] network.lines_t.losses = np.divide(i0_lines**2 * network.lines.r, 1000000) # calculate total losses per line [in MW] network.lines = network.lines.assign( losses=np.sum(network.lines_t.losses).values) # Transformer losses # https://books.google.de/books?id=0glcCgAAQBAJ&pg=PA151&lpg=PA151&dq= # wirkungsgrad+transformator+1000+mva&source=bl&ots=a6TKhNfwrJ&sig= # r2HCpHczRRqdgzX_JDdlJo4hj-k&hl=de&sa=X&ved= # 0ahUKEwib5JTFs6fWAhVJY1AKHa1cAeAQ6AEIXjAI#v=onepage&q= # wirkungsgrad%20transformator%201000%20mva&f=false # Crastan, Elektrische Energieversorgung, p.151 # trafo 1000 MVA: 99.8 % network.transformers = network.transformers.assign( losses=np.multiply(network.transformers.s_nom, (1 - 0.998)).values) # calculate total losses (possibly enhance with adding these values # to network container) losses_total = sum(network.lines.losses) + sum(network.transformers.losses) print("Total lines losses for all snapshots [MW]:", round(losses_total, 2)) losses_costs = losses_total * np.average(network.buses_t.marginal_price) print("Total costs for these losses [EUR]:", round(losses_costs, 2)) return
def calc_line_losses(network)
Calculate losses per line with PF result data Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA s0 : series apparent power of line i0 : series current of line -------
7.069207
6.591951
1.0724
network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) network.lines.loc[(network.lines.v_nom == 110), 'capital_cost'] = cost110 * network.lines.length /\ args['branch_capacity_factor']['HV'] network.lines.loc[(network.lines.v_nom == 220), 'capital_cost'] = cost220 * network.lines.length/\ args['branch_capacity_factor']['eHV'] network.lines.loc[(network.lines.v_nom == 380), 'capital_cost'] = cost380 * network.lines.length/\ args['branch_capacity_factor']['eHV'] network.links.loc[network.links.p_nom_extendable, 'capital_cost'] = costDC * network.links.length return network
def set_line_costs(network, args, cost110=230, cost220=290, cost380=85, costDC=375)
Set capital costs for extendable lines in respect to PyPSA [€/MVA] Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA args: dict containing settings from appl.py cost110 : capital costs per km for 110kV lines and cables default: 230€/MVA/km, source: costs for extra circuit in dena Verteilnetzstudie, p. 146) cost220 : capital costs per km for 220kV lines and cables default: 280€/MVA/km, source: costs for extra circuit in NEP 2025, capactity from most used 220 kV lines in model cost380 : capital costs per km for 380kV lines and cables default: 85€/MVA/km, source: costs for extra circuit in NEP 2025, capactity from most used 380 kV lines in NEP costDC : capital costs per km for DC-lines default: 375€/MVA/km, source: costs for DC transmission line in NEP 2035 -------
2.744699
2.48159
1.106025
network.transformers["v_nom0"] = network.transformers.bus0.map( network.buses.v_nom) network.transformers["v_nom1"] = network.transformers.bus1.map( network.buses.v_nom) network.transformers.loc[(network.transformers.v_nom0 == 110) & ( network.transformers.v_nom1 == 220), 'capital_cost'] = cost110_220/\ args['branch_capacity_factor']['HV'] network.transformers.loc[(network.transformers.v_nom0 == 110) & ( network.transformers.v_nom1 == 380), 'capital_cost'] = cost110_380/\ args['branch_capacity_factor']['HV'] network.transformers.loc[(network.transformers.v_nom0 == 220) & ( network.transformers.v_nom1 == 380), 'capital_cost'] = cost220_380/\ args['branch_capacity_factor']['eHV'] return network
def set_trafo_costs(network, args, cost110_220=7500, cost110_380=17333, cost220_380=14166)
Set capital costs for extendable transformers in respect to PyPSA [€/MVA] Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA cost110_220 : capital costs for 110/220kV transformer default: 7500€/MVA, source: costs for extra trafo in dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod cost110_380 : capital costs for 110/380kV transformer default: 17333€/MVA, source: NEP 2025 cost220_380 : capital costs for 220/380kV transformer default: 14166€/MVA, source: NEP 2025
2.134501
2.015058
1.059275
# Add costs for DC-converter network.links.capital_cost = network.links.capital_cost + 400000 # Calculate present value of an annuity (PVA) PVA = (1 / p) - (1 / (p * (1 + p) ** T)) # Apply function on lines, links, trafos and storages # Storage costs are already annuized yearly network.lines.loc[network.lines.s_nom_extendable == True, 'capital_cost'] = (network.lines.capital_cost / (PVA * (8760 / (end_snapshot - start_snapshot + 1)))) network.links.loc[network.links.p_nom_extendable == True, 'capital_cost'] = network. links.capital_cost /\ (PVA * (8760 / (end_snapshot - start_snapshot + 1))) network.transformers.loc[network.transformers.s_nom_extendable == True, 'capital_cost'] = network.transformers.capital_cost / \ (PVA * (8760 / (end_snapshot - start_snapshot + 1))) network.storage_units.loc[network.storage_units.p_nom_extendable == True, 'capital_cost'] = network.storage_units.capital_cost / \ (8760 / (end_snapshot - start_snapshot + 1)) return network
def convert_capital_costs(network, start_snapshot, end_snapshot, p=0.05, T=40)
Convert capital_costs to fit to pypsa and caluculated time Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA p : interest rate, default 0.05 T : number of periods, default 40 years (source: StromNEV Anlage 1) -------
3.175518
3.078729
1.031438
if carrier == 'residual load': power_plants = network.generators[network.generators.carrier. isin(['solar', 'wind', 'wind_onshore'])] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] load = network.loads_t.p_set.sum(axis=1) all_renew = power_plants_t.sum(axis=1) all_carrier = load - all_renew if carrier in ('solar', 'wind', 'wind_onshore', 'wind_offshore', 'run_of_river'): power_plants = network.generators[network.generators.carrier == carrier] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] all_carrier = power_plants_t.sum(axis=1) if maximum and not minimum: times = all_carrier.sort_values().head(n=n) if minimum and not maximum: times = all_carrier.sort_values().tail(n=n) if maximum and minimum: times = all_carrier.sort_values().head(n=n) times = times.append(all_carrier.sort_values().tail(n=n)) calc_snapshots = all_carrier.index[all_carrier.index.isin(times.index)] return calc_snapshots
def find_snapshots(network, carrier, maximum = True, minimum = True, n = 3)
Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA carrier: str Selected carrier of generators maximum: bool Choose if timestep of maximal feed-in is returned. minimum: bool Choose if timestep of minimal feed-in is returned. n: int Number of maximal/minimal snapshots Returns ------- calc_snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' List containing snapshots
2.63279
2.412617
1.091259
carrier = ['coal', 'biomass', 'gas', 'oil', 'waste', 'lignite', 'uranium', 'geothermal'] data = {'start_up_cost':[77, 57, 42, 57, 57, 77, 50, 57], #€/MW 'start_up_fuel':[4.3, 2.8, 1.45, 2.8, 2.8, 4.3, 16.7, 2.8], #MWh/MW 'min_up_time':[5, 2, 3, 2, 2, 5, 12, 2], 'min_down_time':[7, 2, 2, 2, 2, 7, 17, 2], # ============================================================================= # 'ramp_limit_start_up':[0.4, 0.4, 0.4, 0.4, 0.4, 0.6, 0.5, 0.4], # 'ramp_limit_shut_down':[0.4, 0.4, 0.4, 0.4, 0.4, 0.6, 0.5, 0.4] # ============================================================================= 'p_min_pu':[0.33, 0.38, 0.4, 0.38, 0.38, 0.5, 0.45, 0.38] } df = pd.DataFrame(data, index=carrier) fuel_costs = network.generators.marginal_cost.groupby( network.generators.carrier).mean()[carrier] df['start_up_fuel'] = df['start_up_fuel'] * fuel_costs df['start_up_cost'] = df['start_up_cost'] + df['start_up_fuel'] df.drop('start_up_fuel', axis=1, inplace=True) for tech in df.index: for limit in df.columns: network.generators.loc[network.generators.carrier == tech, limit] = df.loc[tech, limit] network.generators.start_up_cost = network.generators.start_up_cost\ *network.generators.p_nom network.generators.committable = True
def ramp_limits(network)
Add ramping constraints to thermal power plants. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns -------
2.672175
2.637323
1.013215
if not jsonpath == None: with open(jsonpath) as f: args = json.load(f) return args
def get_args_setting(args, jsonpath='scenario_setting.json')
Get and open json file with scenaio settings of eTraGo ``args``. The settings incluedes all eTraGo specific settings of arguments and parameters for a reproducible calculation. Parameters ---------- json_file : str Default: ``scenario_setting.json`` Name of scenario setting json file Returns ------- args : dict Dictionary of json file
4.016329
6.283728
0.639163
transborder_lines_0 = network.lines[network.lines['bus0'].isin( network.buses.index[network.buses['country_code'] != 'DE'])].index transborder_lines_1 = network.lines[network.lines['bus1'].isin( network.buses.index[network.buses['country_code']!= 'DE'])].index #set country tag for lines network.lines.loc[transborder_lines_0, 'country'] = \ network.buses.loc[network.lines.loc[transborder_lines_0, 'bus0']\ .values, 'country_code'].values network.lines.loc[transborder_lines_1, 'country'] = \ network.buses.loc[network.lines.loc[transborder_lines_1, 'bus1']\ .values, 'country_code'].values network.lines['country'].fillna('DE', inplace=True) doubles = list(set(transborder_lines_0.intersection(transborder_lines_1))) for line in doubles: c_bus0 = network.buses.loc[network.lines.loc[line, 'bus0'], 'country'] c_bus1 = network.buses.loc[network.lines.loc[line, 'bus1'], 'country'] network.lines.loc[line, 'country'] = '{}{}'.format(c_bus0, c_bus1) transborder_links_0 = network.links[network.links['bus0'].isin( network.buses.index[network.buses['country_code']!= 'DE'])].index transborder_links_1 = network.links[network.links['bus1'].isin( network.buses.index[network.buses['country_code'] != 'DE'])].index #set country tag for links network.links.loc[transborder_links_0, 'country'] = \ network.buses.loc[network.links.loc[transborder_links_0, 'bus0']\ .values, 'country_code'].values network.links.loc[transborder_links_1, 'country'] = \ network.buses.loc[network.links.loc[transborder_links_1, 'bus1']\ .values, 'country_code'].values network.links['country'].fillna('DE', inplace=True) doubles = list(set(transborder_links_0.intersection(transborder_links_1))) for link in doubles: c_bus0 = network.buses.loc[network.links.loc[link, 'bus0'], 'country'] c_bus1 = network.buses.loc[network.links.loc[link, 'bus1'], 'country'] network.links.loc[link, 'country'] = '{}{}'.format(c_bus0, c_bus1)
def set_line_country_tags(network)
Set country tag for AC- and DC-lines. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA
1.508114
1.507689
1.000282
network.lines["s_nom_total"] = network.lines.s_nom.copy() network.transformers["s_nom_total"] = network.transformers.s_nom.copy() network.lines["v_nom"] = network.lines.bus0.map( network.buses.v_nom) network.transformers["v_nom0"] = network.transformers.bus0.map( network.buses.v_nom) network.lines.s_nom[network.lines.v_nom == 110] = \ network.lines.s_nom * args['branch_capacity_factor']['HV'] network.lines.s_nom[network.lines.v_nom > 110] = \ network.lines.s_nom * args['branch_capacity_factor']['eHV'] network.transformers.s_nom[network.transformers.v_nom0 == 110]\ = network.transformers.s_nom * args['branch_capacity_factor']['HV'] network.transformers.s_nom[network.transformers.v_nom0 > 110]\ = network.transformers.s_nom * args['branch_capacity_factor']['eHV']
def set_branch_capacity(network, args)
Set branch capacity factor of lines and transformers, different factors for HV (110kV) and eHV (220kV, 380kV). Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA args: dict Settings in appl.py
2.290634
1.819938
1.258633
lines_snom = network.lines.s_nom.sum() links_pnom = network.links.p_nom.sum() def _rule(m): lines_opt = sum(m.passive_branch_s_nom[index] for index in m.passive_branch_s_nom_index) links_opt = sum(m.link_p_nom[index] for index in m.link_p_nom_index) return (lines_opt + links_opt) <= (lines_snom + links_pnom) * share network.model.max_line_ext = Constraint(rule=_rule)
def max_line_ext(network, snapshots, share=1.01)
Sets maximal share of overall network extension as extra functionality in LOPF Parameters ---------- share: float Maximal share of network extension in p.u.
3.899347
4.03535
0.966297
renewables = ['wind_onshore', 'wind_offshore', 'biomass', 'solar', 'run_of_river'] res = list(network.generators.index[ network.generators.carrier.isin(renewables)]) total = list(network.generators.index) snapshots = network.snapshots def _rule(m): renewable_production = sum(m.generator_p[gen, sn] for gen in res for sn in snapshots) total_production = sum(m.generator_p[gen, sn] for gen in total for sn in snapshots) return (renewable_production >= total_production * share) network.model.min_renewable_share = Constraint(rule=_rule)
def min_renewable_share(network, snapshots, share=0.72)
Sets minimal renewable share of generation as extra functionality in LOPF Parameters ---------- share: float Minimal share of renewable generation in p.u.
3.988125
3.975637
1.003141
renewables = ['wind_onshore', 'wind_offshore', 'solar'] res = list(network.generators.index[ (network.generators.carrier.isin(renewables)) & (network.generators.bus.astype(str).isin(network.buses.index[network.buses.country_code == 'DE']))]) # network.import_series_from_dataframe(pd.DataFrame( # index=network.generators_t.p_set.index, # columns=network.generators.index[ # network.generators.carrier=='biomass'], # data=1), "Generator", "p_max_pu") res_potential = (network.generators.p_nom[res]*network.generators_t.p_max_pu[res]).sum() snapshots = network.snapshots for gen in res: def _rule(m, gen): #import pdb; pdb.set_trace() re_n = sum(m.generator_p[gen, sn] for sn in snapshots) potential_n = res_potential[gen] return (re_n >= (1-curtail_max) * potential_n) setattr(network.model, "max_curtailment"+gen, Constraint(res, rule=_rule))
def max_curtailment(network, snapshots, curtail_max=0.03)
each RE can only be curtailed (over all snapshots) with respect to curtail_max Parameters ---------- curtail_max: float maximal curtailment per power plant in p.u.
4.724497
4.670208
1.011625
guild_count = 1e10 least_used = None for node in _nodes: guild_ids = node.player_manager.guild_ids if ignore_ready_status is False and not node.ready.is_set(): continue elif len(guild_ids) < guild_count: guild_count = len(guild_ids) least_used = node if guild_id in guild_ids: return node if least_used is None: raise IndexError("No nodes found.") return least_used
def get_node(guild_id: int, ignore_ready_status: bool = False) -> Node
Gets a node based on a guild ID, useful for noding separation. If the guild ID does not already have a node association, the least used node is returned. Skips over nodes that are not yet ready. Parameters ---------- guild_id : int ignore_ready_status : bool Returns ------- Node
3.258406
3.665847
0.888855
node = get_node(guild_id) voice_ws = node.get_voice_ws(guild_id) await voice_ws.voice_state(guild_id, channel_id)
async def join_voice(guild_id: int, channel_id: int)
Joins a voice channel by ID's. Parameters ---------- guild_id : int channel_id : int
3.777473
3.876988
0.974332
self._is_shutdown = False combo_uri = "ws://{}:{}".format(self.host, self.rest) uri = "ws://{}:{}".format(self.host, self.port) log.debug( "Lavalink WS connecting to %s or %s with headers %s", combo_uri, uri, self.headers ) tasks = tuple({self._multi_try_connect(u) for u in (combo_uri, uri)}) for task in asyncio.as_completed(tasks, timeout=timeout): with contextlib.suppress(Exception): if await cast(Awaitable[Optional[websockets.WebSocketClientProtocol]], task): break else: raise asyncio.TimeoutError log.debug("Creating Lavalink WS listener.") self._listener_task = self.loop.create_task(self.listener()) for data in self._queue: await self.send(data) self.ready.set() self.update_state(NodeState.READY)
async def connect(self, timeout=None)
Connects to the Lavalink player event websocket. Parameters ---------- timeout : int Time after which to timeout on attempting to connect to the Lavalink websocket, ``None`` is considered never, but the underlying code may stop trying past a certain point. Raises ------ asyncio.TimeoutError If the websocket failed to connect after the given time.
4.574697
4.415573
1.036037
while self._ws.open and self._is_shutdown is False: try: data = json.loads(await self._ws.recv()) except websockets.ConnectionClosed: break raw_op = data.get("op") try: op = LavalinkIncomingOp(raw_op) except ValueError: socket_log.debug("Received unknown op: %s", data) else: socket_log.debug("Received known op: %s", data) self.loop.create_task(self._handle_op(op, data)) self.ready.clear() log.debug("Listener exited: ws %s SHUTDOWN %s.", self._ws.open, self._is_shutdown) self.loop.create_task(self._reconnect())
async def listener(self)
Listener task for receiving ops from Lavalink.
3.863347
3.479713
1.110249
voice_ws = self.get_voice_ws(guild_id) await voice_ws.voice_state(guild_id, channel_id)
async def join_voice_channel(self, guild_id, channel_id)
Alternative way to join a voice channel if node is known.
3.850003
3.239176
1.188575
self._is_shutdown = True self.ready.clear() self.update_state(NodeState.DISCONNECTING) await self.player_manager.disconnect() if self._ws is not None and self._ws.open: await self._ws.close() if self._listener_task is not None and not self.loop.is_closed(): self._listener_task.cancel() self._state_handlers = [] _nodes.remove(self) log.debug("Shutdown Lavalink WS.")
async def disconnect(self)
Shuts down and disconnects the websocket.
4.889569
4.790731
1.020631
# prepare State Type Change ComboBox super(StateOverviewController, self).register_view(view) self.allowed_state_classes = self.get_allowed_state_classes(self.model.state) view['entry_name'].connect('focus-out-event', self.on_focus_out) view['entry_name'].connect('key-press-event', self.check_for_enter) if self.model.state.name: view['entry_name'].set_text(self.model.state.name) view['label_id_value'].set_text(self.model.state.state_id) l_store = Gtk.ListStore(GObject.TYPE_STRING) combo = Gtk.ComboBoxText() combo.set_name("state_type_combo") combo.set_focus_on_click(False) combo.set_model(l_store) combo.show_all() view['type_viewport'].add(combo) view['type_viewport'].show() # Prepare label for state_name -> Library states cannot be changed if isinstance(self.model, LibraryStateModel): l_store.prepend(['LIBRARY']) combo.set_sensitive(False) self.view['library_path'].set_text(self.model.state.library_path + "/" + self.model.state.library_name) self.view['library_path'].set_sensitive(True) self.view['library_path'].set_editable(False) view['show_content_checkbutton'].set_active(self.model.meta['gui']['show_content'] is True) view['show_content_checkbutton'].connect('toggled', self.on_toggle_show_content) # self.view['properties_widget'].remove(self.view['show_content_checkbutton']) else: self.view['properties_widget'].remove(self.view['label_library_path']) self.view['properties_widget'].remove(self.view['library_path']) self.view['properties_widget'].remove(self.view['label_show_content']) self.view['properties_widget'].remove(self.view['show_content_checkbutton']) self.view['properties_widget'].resize(2, 5) for state_class in self.allowed_state_classes: if isinstance(self.model.state, state_class): l_store.prepend([state_class.__name__]) else: l_store.append([state_class.__name__]) combo.set_active(0) view['type_combobox'] = combo view['type_combobox'].connect('changed', self.change_type) # Prepare "is start state check button" has_no_start_state_state_types = [BarrierConcurrencyState, PreemptiveConcurrencyState] if not self.with_is_start_state_check_box or isinstance(self.model.state, DeciderState) or \ self.model.state.is_root_state or type(self.model.parent.state) in has_no_start_state_state_types: view['is_start_state_checkbutton'].destroy() else: view['is_start_state_checkbutton'].set_active(bool(self.model.is_start)) view['is_start_state_checkbutton'].connect('toggled', self.on_toggle_is_start_state) if isinstance(self.model.state, DeciderState): combo.set_sensitive(False) # in case the state is inside of a library if self.model.state.get_next_upper_library_root_state(): view['entry_name'].set_editable(False) combo.set_sensitive(False) view['is_start_state_checkbutton'].set_sensitive(False) if isinstance(self.model, LibraryStateModel): self.view['show_content_checkbutton'].set_sensitive(False)
def register_view(self, view)
Called when the View was registered Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application :param rafcon.gui.views.state_editor.overview.StateOverviewView view: A state overview view instance
3.006519
2.978143
1.009528
logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else "")) self.setup_run() # data to be accessed by the decider state child_errors = {} final_outcomes_dict = {} decider_state = self.states[UNIQUE_DECIDER_STATE_ID] try: concurrency_history_item = self.setup_forward_or_backward_execution() self.start_child_states(concurrency_history_item, decider_state) # print("bcs1") ####################################################### # wait for all child threads to finish ####################################################### for history_index, state in enumerate(self.states.values()): # skip the decider state if state is not decider_state: self.join_state(state, history_index, concurrency_history_item) self.add_state_execution_output_to_scoped_data(state.output_data, state) self.update_scoped_variables_with_output_dictionary(state.output_data, state) # save the errors of the child state executions for the decider state if 'error' in state.output_data: child_errors[state.state_id] = (state.name, state.output_data['error']) final_outcomes_dict[state.state_id] = (state.name, state.final_outcome) # print("bcs2") ####################################################### # handle backward execution case ####################################################### if self.backward_execution: # print("bcs2.1.") return self.finalize_backward_execution() else: # print("bcs2.2.") self.backward_execution = False # print("bcs3") ####################################################### # execute decider state ####################################################### decider_state_error = self.run_decider_state(decider_state, child_errors, final_outcomes_dict) # print("bcs4") ####################################################### # handle no transition ####################################################### transition = self.get_transition_for_outcome(decider_state, decider_state.final_outcome) if transition is None: # final outcome is set here transition = self.handle_no_transition(decider_state) # if the transition is still None, then the child_state was preempted or aborted, in this case return decider_state.state_execution_status = StateExecutionStatus.INACTIVE # print("bcs5") if transition is None: self.output_data["error"] = RuntimeError("state aborted") else: if decider_state_error: self.output_data["error"] = decider_state_error self.final_outcome = self.outcomes[transition.to_outcome] # print("bcs6") return self.finalize_concurrency_state(self.final_outcome) except Exception as e: logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc()))) self.output_data["error"] = e self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE return self.finalize(Outcome(-1, "aborted"))
def run(self)
This defines the sequence of actions that are taken when the barrier concurrency state is executed :return:
3.865596
3.861499
1.001061
decider_state.state_execution_status = StateExecutionStatus.ACTIVE # forward the decider specific data decider_state.child_errors = child_errors decider_state.final_outcomes_dict = final_outcomes_dict # standard state execution decider_state.input_data = self.get_inputs_for_state(decider_state) decider_state.output_data = self.create_output_dictionary_for_state(decider_state) decider_state.start(self.execution_history, backward_execution=False) decider_state.join() decider_state_error = None if decider_state.final_outcome.outcome_id == -1: if 'error' in decider_state.output_data: decider_state_error = decider_state.output_data['error'] # standard output data processing self.add_state_execution_output_to_scoped_data(decider_state.output_data, decider_state) self.update_scoped_variables_with_output_dictionary(decider_state.output_data, decider_state) return decider_state_error
def run_decider_state(self, decider_state, child_errors, final_outcomes_dict)
Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the barrier concurrency is left. :param decider_state: the decider state of the barrier concurrency state :param child_errors: error of the concurrent branches :param final_outcomes_dict: dictionary of all outcomes of the concurrent branches :return:
3.265498
3.549058
0.920103
valid, message = super(BarrierConcurrencyState, self)._check_transition_validity(check_transition) if not valid: return False, message # Only the following transitions are allowed in barrier concurrency states: # - Transitions from the decider state to the parent state\n" # - Transitions from not-decider states to the decider state\n" # - Transitions from not_decider states from aborted/preempted outcomes to the # aborted/preempted outcome of the parent from_state_id = check_transition.from_state to_state_id = check_transition.to_state from_outcome_id = check_transition.from_outcome to_outcome_id = check_transition.to_outcome if from_state_id == UNIQUE_DECIDER_STATE_ID: if to_state_id != self.state_id: return False, "Transition from the decider state must go to the parent state" else: if to_state_id != UNIQUE_DECIDER_STATE_ID: if from_outcome_id not in [-2, -1] or to_outcome_id not in [-2, -1]: return False, "Transition from this state must go to the decider state. The only exception are " \ "transition from aborted/preempted to the parent aborted/preempted outcomes" return True, message
def _check_transition_validity(self, check_transition)
Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState. Start transitions are forbidden in the ConcurrencyState. :param check_transition: the transition to check for validity :return:
3.64205
3.404873
1.069658
state_id = super(BarrierConcurrencyState, self).add_state(state) if not storage_load and not self.__init_running and not state.state_id == UNIQUE_DECIDER_STATE_ID: # the transitions must only be created for the initial add_state call and not during each load procedure for o_id, o in list(state.outcomes.items()): if not o_id == -1 and not o_id == -2: self.add_transition(state.state_id, o_id, self.states[UNIQUE_DECIDER_STATE_ID].state_id, None) return state_id
def add_state(self, state, storage_load=False)
Overwrite the parent class add_state method Add automatic transition generation for the decider_state. :param state: The state to be added :return:
6.831786
6.517438
1.048232
# First safely remove all existing states (recursively!), as they will be replaced state_ids = list(self.states.keys()) for state_id in state_ids: # Do not remove decider state, if teh new list of states doesn't contain an alternative one if state_id == UNIQUE_DECIDER_STATE_ID and UNIQUE_DECIDER_STATE_ID not in states: continue self.remove_state(state_id) if states is not None: if not isinstance(states, dict): raise TypeError("states must be of type dict") # Ensure that the decider state is added first, as transition to this states will automatically be # created when adding further states decider_state = states.pop(UNIQUE_DECIDER_STATE_ID, None) if decider_state is not None: self.add_state(decider_state) for state in states.values(): self.add_state(state)
def states(self, states)
Overwrite the setter of the container state base class as special handling for the decider state is needed. :param states: the dictionary of new states :raises exceptions.TypeError: if the states parameter is not of type dict
4.354705
3.969538
1.097031
if state_id == UNIQUE_DECIDER_STATE_ID and force is False: raise AttributeError("You are not allowed to delete the decider state.") else: return ContainerState.remove_state(self, state_id, recursive=recursive, force=force, destroy=destroy)
def remove_state(self, state_id, recursive=True, force=False, destroy=True)
Overwrite the parent class remove state method by checking if the user tries to delete the decider state :param state_id: the id of the state to remove :param recursive: a flag to indicate a recursive disassembling of all substates :param force: a flag to indicate forcefully deletion of all states (important of the decider state in the barrier concurrency state) :param destroy: a flag which indicates if the state should not only be disconnected from the state but also destroyed, including all its state elements :raises exceptions.AttributeError: if the state_id parameter is the decider state
4.691826
3.779212
1.241483
return_value = None for state_id, name_outcome_tuple in self.final_outcomes_dict.items(): if name_outcome_tuple[0] == name: return_value = name_outcome_tuple[1] break return return_value
def get_outcome_for_state_name(self, name)
Returns the final outcome of the child state specified by name. Note: This is utility function that is used by the programmer to make a decision based on the final outcome of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want to use state-ids in his code this utility function was defined. :param name: The name of the state to get the final outcome for. :return:
2.881663
3.218688
0.895291
return_value = None for s_id, name_outcome_tuple in self.final_outcomes_dict.items(): if s_id == state_id: return_value = name_outcome_tuple[1] break return return_value
def get_outcome_for_state_id(self, state_id)
Returns the final outcome of the child state specified by the state_id. :param state_id: The id of the state to get the final outcome for. :return:
3.421376
4.010911
0.853017
return_value = None for state_id, name_outcome_tuple in self.child_errors.items(): if name_outcome_tuple[0] == name: return_value = name_outcome_tuple[1] break return return_value
def get_errors_for_state_name(self, name)
Returns the error message of the child state specified by name. Note: This is utility function that is used by the programmer to make a decision based on the final outcome of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want to use state-ids in his code this utility function was defined. :param name: The name of the state to get the error message for :return:
3.729596
4.032992
0.924771
assert isinstance(controller, ExtendedController) controller.parent = self self.__child_controllers[key] = controller if self.__shortcut_manager is not None and controller not in self.__action_registered_controllers: controller.register_actions(self.__shortcut_manager) self.__action_registered_controllers.append(controller)
def add_controller(self, key, controller)
Add child controller The passed controller is registered as child of self. The register_actions method of the child controller is called, allowing the child controller to register shortcut callbacks. :param key: Name of the controller (unique within self), to later access it again :param ExtendedController controller: Controller to be added as child
4.094392
3.134997
1.306028
# Get name of controller if isinstance(controller, ExtendedController): # print(self.__class__.__name__, " remove ", controller.__class__.__name__) for key, child_controller in self.__child_controllers.items(): if controller is child_controller: break else: return False else: key = controller # print(self.__class__.__name__, " remove key ", key, self.__child_controllers.keys()) if key in self.__child_controllers: if self.__shortcut_manager is not None: self.__action_registered_controllers.remove(self.__child_controllers[key]) self.__child_controllers[key].unregister_actions(self.__shortcut_manager) self.__child_controllers[key].destroy() del self.__child_controllers[key] # print("removed", controller.__class__.__name__ if not isinstance(controller, str) else controller) return True # print("do not remove", controller.__class__.__name__) return False
def remove_controller(self, controller)
Remove child controller and destroy it Removes all references to the child controller and calls destroy() on the controller. :param str | ExtendedController controller: Either the child controller object itself or its registered name :return: Whether the controller was existing :rtype: bool
3.072303
2.814634
1.091546
assert isinstance(shortcut_manager, ShortcutManager) self.__shortcut_manager = shortcut_manager for controller in list(self.__child_controllers.values()): if controller not in self.__action_registered_controllers: try: controller.register_actions(shortcut_manager) except Exception as e: logger.error("Error while registering action for {0}: {1}".format(controller.__class__.__name__, e)) self.__action_registered_controllers.append(controller)
def register_actions(self, shortcut_manager)
Register callback methods for triggered actions in all child controllers. :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions.
2.712991
2.625838
1.03319
self.disconnect_all_signals() controller_names = [key for key in self.__child_controllers] for controller_name in controller_names: self.remove_controller(controller_name) self.relieve_all_models() if self.parent: self.__parent = None if self._view_initialized: # print(self.__class__.__name__, "destroy view", self.view, self) self.view.get_top_widget().destroy() self.view = None self._Observer__PROP_TO_METHS.clear() # prop name --> set of observing methods self._Observer__METH_TO_PROPS.clear() # method --> set of observed properties self._Observer__PAT_TO_METHS.clear() # like __PROP_TO_METHS but only for pattern names (to optimize search) self._Observer__METH_TO_PAT.clear() # method --> pattern self._Observer__PAT_METH_TO_KWARGS.clear() # (pattern, method) --> info self.observe = None else: logger.warning("The controller {0} seems to be destroyed before the view was fully initialized. {1} " "Check if you maybe do not call {2} or there exist most likely threading problems." "".format(self.__class__.__name__, self.model, ExtendedController.register_view))
def destroy(self)
Recursively destroy all Controllers The method remove all controllers, which calls the destroy method of the child controllers. Then, all registered models are relieved and and the widget hand by the initial view argument is destroyed.
6.296669
5.793082
1.086929
self.__registered_models.add(model) return super(ExtendedController, self).observe_model(model)
def observe_model(self, model)
Make this model observable within the controller The method also keeps track of all observed models, in order to be able to relieve them later on. :param gtkmvc3.Model model: The model to be observed
7.207209
8.322705
0.86597
self.__registered_models.remove(model) return super(ExtendedController, self).relieve_model(model)
def relieve_model(self, model)
Do no longer observe the model The model is also removed from the internal set of tracked models. :param gtkmvc3.Model model: The model to be relieved
6.726155
7.644793
0.879835
map(self.relieve_model, list(self.__registered_models)) self.__registered_models.clear()
def relieve_all_models(self)
Relieve all registered models The method uses the set of registered models to relieve them.
6.045024
4.621396
1.308052
old_data_type = self.data_type self.data_type = data_type if default_value is None: default_value = self.default_value if type_helpers.type_inherits_of_type(type(default_value), self._data_type): self._default_value = default_value else: if old_data_type.__name__ == "float" and data_type == "int": if self.default_value: self._default_value = int(default_value) else: self._default_value = 0 elif old_data_type.__name__ == "int" and data_type == "float": if self.default_value: self._default_value = float(default_value) else: self._default_value = 0.0 else: self._default_value = None
def change_data_type(self, data_type, default_value=None)
This method changes both the data type and default value. If one of the parameters does not fit, an exception is thrown and no property is changed. Using this method ensures a consistent data type and default value and only notifies once. :param data_type: The new data type :param default_value: The new default value :return:
2.050031
2.176552
0.941871
if data_type is None: data_type = self.data_type if default_value is not None: # If the default value is passed as string, we have to convert it to the data type if isinstance(default_value, string_types): if len(default_value) > 1 and default_value[0] == '$': return default_value if default_value == "None": return None default_value = type_helpers.convert_string_value_to_type_value(default_value, data_type) if default_value is None: raise AttributeError("Could not convert default value '{0}' to data type '{1}'.".format( default_value, data_type)) else: if not isinstance(default_value, self.data_type): if self._no_type_error_exceptions: logger.warning("Handed default value '{0}' is of type '{1}' but data port data type is {2} {3}." "".format(default_value, type(default_value), data_type, self)) else: raise TypeError("Handed default value '{0}' is of type '{1}' but data port data type is {2}" "{3} of {4}.".format(default_value, type(default_value), data_type, self, self.parent.get_path() if self.parent is not None else "")) return default_value
def check_default_value(self, default_value, data_type=None)
Check whether the passed default value suits to the passed data type. If no data type is passed, the data type of the data port is used. If the default value does not fit, an exception is thrown. If the default value is of type string, it is tried to convert that value to the data type. :param default_value: The default value to check :param data_type: The data type to use :raises exceptions.AttributeError: if check fails :return: The converted default value
2.796844
2.732151
1.023678
user_input = input(query + ': ') if len(user_input) == 0: user_input = default_path if not user_input or not os.path.isdir(user_input): return None return user_input
def open_folder_cmd_line(query, default_path=None)
Queries the user for a path to open :param str query: Query that asks the user for a specific folder path to be opened :param str default_path: Path to use if the user doesn't specify a path :return: Input path from the user or `default_path` if nothing is specified or None if path does not exist :rtype: str
2.372576
2.573534
0.921913
default = None if default_name and default_path: default = os.path.join(default_path, default_name) user_input = input(query + ' [default {}]: '.format(default)) if len(user_input) == 0: user_input = default if not user_input: return None if not os.path.isdir(user_input): try: os.makedirs(user_input) except OSError: return None return user_input
def create_folder_cmd_line(query, default_name=None, default_path=None)
Queries the user for a path to be created :param str query: Query that asks the user for a specific folder path to be created :param str default_name: Default name of the folder to be created :param str default_path: Path in which the folder is created if the user doesn't specify a path :return: Input path from the user or `default_path` if nothing is specified or None if directory could ne be created :rtype: str
2.08986
2.307019
0.905871
default = None if default_name and default_path: default = os.path.join(default_path, default_name) user_input = input(query + ' [default {}]: '.format(default)) if len(user_input) == 0: user_input = default if not user_input or not os.path.isdir(os.path.dirname(user_input)): return None return user_input
def save_folder_cmd_line(query, default_name=None, default_path=None)
Queries the user for a path or file to be saved into The folder or file has not to be created already and will not be created by this function. The parent directory of folder and file has to exist otherwise the function will return None. :param str query: Query that asks the user for a specific folder/file path to be created :param str default_name: Default name of the folder to be created :param str default_path: Path in which the folder is created if the user doesn't specify a path :return: Input path from the user or `default_path` if nothing is specified and None if directory does not exist :rtype: str
2.303898
2.54501
0.905261
super(StateTransitionsListController, self).register_view(view) def cell_text(column, cell_renderer, model, iter, data): t_id = model.get_value(iter, self.ID_STORAGE_ID) in_external = 'external' if model.get_value(iter, self.IS_EXTERNAL_STORAGE_ID) else 'internal' # print(t_id, in_external, self.combo[in_external]) if column.get_title() == 'Source State': cell_renderer.set_property("model", self.combo[in_external][t_id]['from_state']) cell_renderer.set_property("text-column", 0) cell_renderer.set_property("has-entry", False) elif column.get_title() == 'Source Outcome': cell_renderer.set_property("model", self.combo[in_external][t_id]['from_outcome']) cell_renderer.set_property("text-column", 0) cell_renderer.set_property("has-entry", False) elif column.get_title() == 'Target State': cell_renderer.set_property("model", self.combo[in_external][t_id]['to_state']) cell_renderer.set_property("text-column", 0) cell_renderer.set_property("has-entry", False) elif column.get_title() == 'Target Outcome': cell_renderer.set_property("model", self.combo[in_external][t_id]['to_outcome']) cell_renderer.set_property("text-column", 0) cell_renderer.set_property("has-entry", False) else: logger.warning("Column has no cell_data_func %s %s" % (column.get_name(), column.get_title())) view['from_state_col'].set_cell_data_func(view['from_state_combo'], cell_text) view['to_state_col'].set_cell_data_func(view['to_state_combo'], cell_text) view['from_outcome_col'].set_cell_data_func(view['from_outcome_combo'], cell_text) view['to_outcome_col'].set_cell_data_func(view['to_outcome_combo'], cell_text) if self.model.state.get_next_upper_library_root_state(): view['from_state_combo'].set_property("editable", False) view['from_outcome_combo'].set_property("editable", False) view['to_state_combo'].set_property("editable", False) view['to_outcome_combo'].set_property("editable", False) else: self.connect_signal(view['from_state_combo'], "edited", self.on_combo_changed_from_state) self.connect_signal(view['from_outcome_combo'], "edited", self.on_combo_changed_from_outcome) self.connect_signal(view['to_state_combo'], "edited", self.on_combo_changed_to_state) self.connect_signal(view['to_outcome_combo'], "edited", self.on_combo_changed_to_outcome) view.tree_view.connect("grab-focus", self.on_focus) self.update(initiator='"register view"')
def register_view(self, view)
Called when the View was registered
2.000536
1.998949
1.000794
assert model.transition.parent is self.model.state or model.transition.parent is self.model.parent.state gui_helper_state_machine.delete_core_element_of_model(model)
def remove_core_element(self, model)
Remove respective core element of handed transition model :param TransitionModel model: Transition model which core element should be removed :return:
10.297846
10.124716
1.0171
model = self.model # print("clean data base") ### FOR COMBOS # internal transitions # - take all internal states # - take all not used internal outcomes of this states # external transitions # - take all external states # - take all external outcomes # - take all not used own outcomes ### LINKING # internal -> transition_id -> from_state = outcome combos # -> ... # external -> state -> outcome combos self.combo['internal'] = {} self.combo['external'] = {} self.combo['free_from_states'] = {} self.combo['free_from_outcomes_dict'] = {} self.combo['free_ext_from_outcomes_dict'] = {} self.combo['free_ext_from_outcomes_dict'] = {} if isinstance(model, ContainerStateModel): # check for internal combos for transition_id, transition in model.state.transitions.items(): self.combo['internal'][transition_id] = {} [from_state_combo, from_outcome_combo, to_state_combo, to_outcome_combo, free_from_states, free_from_outcomes_dict] = \ self.get_possible_combos_for_transition(transition, self.model, self.model) self.combo['internal'][transition_id]['from_state'] = from_state_combo self.combo['internal'][transition_id]['from_outcome'] = from_outcome_combo self.combo['internal'][transition_id]['to_state'] = to_state_combo self.combo['internal'][transition_id]['to_outcome'] = to_outcome_combo self.combo['free_from_states'] = free_from_states self.combo['free_from_outcomes_dict'] = free_from_outcomes_dict if not model.state.transitions: [x, y, z, v, free_from_states, free_from_outcomes_dict] = \ self.get_possible_combos_for_transition(None, self.model, self.model) self.combo['free_from_states'] = free_from_states self.combo['free_from_outcomes_dict'] = free_from_outcomes_dict # TODO check why the can happen should not be handed always the LibraryStateModel if not (self.model.state.is_root_state or self.model.state.is_root_state_of_library): # check for external combos for transition_id, transition in model.parent.state.transitions.items(): if transition.from_state == model.state.state_id or transition.to_state == model.state.state_id: self.combo['external'][transition_id] = {} [from_state_combo, from_outcome_combo, to_state_combo, to_outcome_combo, free_from_states, free_from_outcomes_dict] = \ self.get_possible_combos_for_transition(transition, self.model.parent, self.model, True) self.combo['external'][transition_id]['from_state'] = from_state_combo self.combo['external'][transition_id]['from_outcome'] = from_outcome_combo self.combo['external'][transition_id]['to_state'] = to_state_combo self.combo['external'][transition_id]['to_outcome'] = to_outcome_combo self.combo['free_ext_from_states'] = free_from_states self.combo['free_ext_from_outcomes_dict'] = free_from_outcomes_dict if not model.parent.state.transitions: [x, y, z, v, free_from_states, free_from_outcomes_dict] = \ self.get_possible_combos_for_transition(None, self.model.parent, self.model, True) self.combo['free_ext_from_states'] = free_from_states self.combo['free_ext_from_outcomes_dict'] = free_from_outcomes_dict
def _update_internal_data_base(self)
Updates Internal combo knowledge for any actual transition by calling get_possible_combos_for_transition- function for those.
2.209808
2.128923
1.037993
shortcut_manager.add_callback_for_action("delete", self.trans_list_ctrl.remove_action_callback) shortcut_manager.add_callback_for_action("add", self.trans_list_ctrl.add_action_callback)
def register_actions(self, shortcut_manager)
Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager:
4.755438
4.105953
1.158181
if dirty_lock_file is not None \ and not dirty_lock_file == os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]): logger.debug("Move dirty lock from root tmp folder {0} to state machine folder {1}" "".format(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]))) os.rename(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]))
def move_dirty_lock_file(dirty_lock_file, sm_path)
Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore
2.296429
2.238973
1.025662
auto_backup_meta_file = os.path.join(self._tmp_storage_path, FILE_NAME_AUTO_BACKUP) storage.storage_utils.write_dict_to_json(self.meta, auto_backup_meta_file)
def write_backup_meta_data(self)
Write the auto backup meta data into the current tmp-storage path
5.336219
3.804453
1.402625