sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def selection_notification(self, model, property, info):
"""If a single state is selected, open the corresponding tab"""
if model is not self.current_state_machine_m or len(self.current_state_machine_m.ongoing_complex_actions) > 0:
return
state_machine_m = model
assert isinstance(state_machine_m.selection, Selection)
if len(state_machine_m.selection.states) == 1 and len(state_machine_m.selection) == 1:
self.activate_state_tab(state_machine_m.selection.get_selected_state()) | If a single state is selected, open the corresponding tab | entailment |
def notify_state_name_change(self, model, prop_name, info):
"""Checks whether the name of a state was changed and change the tab label accordingly
"""
# avoid updates or checks because of execution status updates
if is_execution_status_update_notification_from_state_machine_model(prop_name, info):
return
overview = NotificationOverview(info, False, self.__class__.__name__)
changed_model = overview['model'][-1]
method_name = overview['method_name'][-1]
if isinstance(changed_model, AbstractStateModel) and method_name in ['name', 'script_text']:
self.update_tab_label(changed_model) | Checks whether the name of a state was changed and change the tab label accordingly | entailment |
def update_tab_label(self, state_m):
"""Update all tab labels
:param rafcon.state_machine.states.state.State state_m: State model who's tab label is to be updated
"""
state_identifier = self.get_state_identifier(state_m)
if state_identifier not in self.tabs and state_identifier not in self.closed_tabs:
return
tab_info = self.tabs[state_identifier] if state_identifier in self.tabs else self.closed_tabs[state_identifier]
page = tab_info['page']
set_tab_label_texts(page.title_label, state_m, tab_info['source_code_view_is_dirty']) | Update all tab labels
:param rafcon.state_machine.states.state.State state_m: State model who's tab label is to be updated | entailment |
def get_state_identifier_for_page(self, page):
"""Return the state identifier for a given page
"""
for identifier, page_info in list(self.tabs.items()):
if page_info["page"] is page: # reference comparison on purpose
return identifier | Return the state identifier for a given page | entailment |
def rename_selected_state(self, key_value, modifier_mask):
"""Callback method for shortcut action rename
Searches for a single selected state model and open the according page. Page is created if it is not
existing. Then the rename method of the state controller is called.
:param key_value:
:param modifier_mask:
"""
selection = self.current_state_machine_m.selection
if len(selection.states) == 1 and len(selection) == 1:
selected_state = selection.get_selected_state()
self.activate_state_tab(selected_state)
_, state_identifier = self.find_page_of_state_m(selected_state)
state_controller = self.tabs[state_identifier]['controller']
state_controller.rename() | Callback method for shortcut action rename
Searches for a single selected state model and open the according page. Page is created if it is not
existing. Then the rename method of the state controller is called.
:param key_value:
:param modifier_mask: | entailment |
def generate_semantic_data_key(used_semantic_keys):
""" Create a new and unique semantic data key
:param list used_semantic_keys: Handed list of keys already in use
:rtype: str
:return: semantic_data_id
"""
semantic_data_id_counter = -1
while True:
semantic_data_id_counter += 1
if "semantic data key " + str(semantic_data_id_counter) not in used_semantic_keys:
break
return "semantic data key " + str(semantic_data_id_counter) | Create a new and unique semantic data key
:param list used_semantic_keys: Handed list of keys already in use
:rtype: str
:return: semantic_data_id | entailment |
def state_id_generator(size=STATE_ID_LENGTH, chars=string.ascii_uppercase, used_state_ids=None):
""" Create a new and unique state id
Generates an id for a state. It randomly samples from random ascii uppercase letters size times
and concatenates them. If the id already exists it draws a new one.
:param size: the length of the generated keys
:param chars: the set of characters a sample draws from
:param list used_state_ids: Handed list of ids already in use
:rtype: str
:return: new_state_id
"""
new_state_id = ''.join(random.choice(chars) for x in range(size))
while used_state_ids is not None and new_state_id in used_state_ids:
new_state_id = ''.join(random.choice(chars) for x in range(size))
return new_state_id | Create a new and unique state id
Generates an id for a state. It randomly samples from random ascii uppercase letters size times
and concatenates them. If the id already exists it draws a new one.
:param size: the length of the generated keys
:param chars: the set of characters a sample draws from
:param list used_state_ids: Handed list of ids already in use
:rtype: str
:return: new_state_id | entailment |
def global_variable_id_generator(size=10, chars=string.ascii_uppercase):
""" Create a new and unique global variable id
Generates an id for a global variable. It randomly samples from random ascii uppercase letters size times
and concatenates them. If the id already exists it draws a new one.
:param size: the length of the generated keys
:param chars: the set of characters a sample draws from
"""
new_global_variable_id = ''.join(random.choice(chars) for x in range(size))
while new_global_variable_id in used_global_variable_ids:
new_global_variable_id = ''.join(random.choice(chars) for x in range(size))
used_global_variable_ids.append(new_global_variable_id)
return new_global_variable_id | Create a new and unique global variable id
Generates an id for a global variable. It randomly samples from random ascii uppercase letters size times
and concatenates them. If the id already exists it draws a new one.
:param size: the length of the generated keys
:param chars: the set of characters a sample draws from | entailment |
def set(self):
"""Set the color as current OpenGL color
"""
glColor4f(self.r, self.g, self.b, self.a) | Set the color as current OpenGL color | entailment |
def _configure(self, *args):
"""Configure viewport
This method is called when the widget is resized or something triggers a redraw. The method configures the
view to show all elements in an orthogonal perspective.
"""
# Obtain a reference to the OpenGL drawable
# and rendering context.
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
# logger.debug("configure")
# OpenGL begin
if not gldrawable or not gldrawable.gl_begin(glcontext):
return False
# Draw on the full viewport
glViewport(0, 0, self.get_allocation().width, self.get_allocation().height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Orthogonal view with correct aspect ratio
self._apply_orthogonal_view()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# OpenGL end
gldrawable.gl_end()
return False | Configure viewport
This method is called when the widget is resized or something triggers a redraw. The method configures the
view to show all elements in an orthogonal perspective. | entailment |
def pixel_to_size_ratio(self):
"""Calculates the ratio between pixel and OpenGL distances
OpenGL keeps its own coordinate system. This method can be used to transform between pixel and OpenGL
coordinates.
:return: pixel/size ratio
"""
left, right, _, _ = self.get_view_coordinates()
width = right - left
display_width = self.get_allocation().width
return display_width / float(width) | Calculates the ratio between pixel and OpenGL distances
OpenGL keeps its own coordinate system. This method can be used to transform between pixel and OpenGL
coordinates.
:return: pixel/size ratio | entailment |
def expose_init(self, *args):
"""Process the drawing routine
"""
# Obtain a reference to the OpenGL drawable
# and rendering context.
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
# OpenGL begin
if not gldrawable or not gldrawable.gl_begin(glcontext):
return False
# logger.debug("expose_init")
# Reset buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Prepare name stack
glInitNames()
glPushName(0)
self.name_counter = 1
return False | Process the drawing routine | entailment |
def expose_finish(self, *args):
"""Finish drawing process
"""
# Obtain a reference to the OpenGL drawable
# and rendering context.
gldrawable = self.get_gl_drawable()
# glcontext = self.get_gl_context()
if not gldrawable:
return
# Put the buffer on the screen!
if gldrawable.is_double_buffered():
gldrawable.swap_buffers()
else:
glFlush()
# OpenGL end
gldrawable.gl_end() | Finish drawing process | entailment |
def draw_state(self, name, pos, size, outcomes=None, input_ports_m=None, output_ports_m=None, selected=False,
active=False, depth=0):
"""Draw a state with the given properties
This method is called by the controller to draw the specified (container) state.
:param name: Name of the state
:param pos: position (x, y) of the state
:param size: size (width, height) of the state
:param outcomes: outcomes of the state (list with outcome objects)
:param input_ports_m: input ports of the state
:param output_ports_m: output ports of the state
:param selected: whether to display the state as selected
:param active: in which color to display the state
:param depth: The z layer
:return: The OpenGL id and the positions of teh outcomes (as dictionary with outcome id as key)
"""
if not outcomes:
outcomes = []
if not input_ports_m:
input_ports_m = []
if not output_ports_m:
output_ports_m = []
# "Generate" unique ID for each object
opengl_id = self.name_counter
self.name_counter += 1
glPushName(opengl_id)
self._set_closest_stroke_width(1.5)
width = size[0]
height = size[1]
# First draw the face of the rectangular, then the outline
fill_color = self.state_color
border_color = self.border_color
from rafcon.core.states.state import StateExecutionStatus
border_width = min(size) / 10.
if active is StateExecutionStatus.ACTIVE:
fill_color = self.state_active_color
elif active is StateExecutionStatus.EXECUTE_CHILDREN:
fill_color = self.state_child_active_color
elif active is StateExecutionStatus.WAIT_FOR_NEXT_STATE:
fill_color = self.state_waiting_for_next_state_color
if selected:
border_width *= 2
border_color = self.border_selected_color
self._draw_rect(pos[0], pos[0] + width, pos[1] - height, pos[1], depth, border_width,
fill_color, border_color)
# Put the name of the state in the upper left corner of the state
margin = min(size) / 12.
font_size = min(size) / 8.
name = self._shorten_string(name, font_size, width - 2 * margin)
self._write_string(name, pos[0] + margin, pos[1] - margin, font_size, self.state_name_color, True,
False, depth=depth + 0.01)
resize_length = min(width, height) / 8.
p1 = (pos[0] + width, pos[1] - height)
p2 = (p1[0] - resize_length, p1[1])
p3 = (p1[0], p1[1] + resize_length)
self._draw_triangle(p1, p2, p3, depth + 0.01, border_width, fill_color, border_color)
# Draw outcomes as circle on the right side of the state
# Every state has at least the default outcomes "aborted" and "preempted"
num_outcomes = max(0, len(outcomes))
if num_outcomes < 2:
# logger.warning("Expecting at least 2 outcomes, found {num:d}".format(num=num_outcomes))
pass
else:
num_outcomes -= 2
i = 0
outcome_pos = {}
outcome_radius = min(min(height, width) / 23., min(height, width) / (2. * num_outcomes + 3))
for key in outcomes:
# Color of outcome is defined by its type, "aborted", "preempted" or else
outcome_name = outcomes[key].name
color = self.outcome_plain_color
# Distribute outcomes (as circles) on the right edge of the state
outcome_x = pos[0] + width
outcome_y = pos[1] - height / (num_outcomes + 1) * (i + 1)
if outcome_name in ["aborted", "preempted"]:
step = min(size) / 10.
outcome_y = pos[1]
if outcome_name == "aborted":
outcome_x -= step
color = self.outcome_aborted_color
else:
outcome_x -= 3 * step
color = self.outcome_preempted_color
else:
outcome_font_size = font_size * 0.5
max_outcome_name_width = width / 2.
outcome_name = self._shorten_string(outcome_name, outcome_font_size, max_outcome_name_width)
outcome_name_pos_x = outcome_x - margin
outcome_name_pos_y = outcome_y + outcome_font_size * 0.65
self._write_string(outcome_name, outcome_name_pos_x, outcome_name_pos_y, outcome_font_size,
self.state_name_color, align_right=True, depth=depth+0.1)
i += 1
color.set()
outcome_pos[key] = (outcome_x, outcome_y)
self._draw_circle(outcome_x, outcome_y, outcome_radius, depth + 0.1, fill_color=color)
# Draw "income" as a half circle of the left center
income_pos = (pos[0], pos[1] - height / 2)
self._draw_circle(income_pos[0], income_pos[1], outcome_radius, depth + 0.1, fill_color=self.income_color,
from_angle=1.5 * pi, to_angle=0.5 * pi)
# Draw input and output data ports
port_radius = margin / 4.
num_ports = len(input_ports_m) + len(output_ports_m)
if num_ports > 0:
max_name_width = 0
margin = min(size) / 10.0
max_allowed_name_width = 0.7 * width - margin
str_height = height / 12.0
# Determine the maximum width of all port labels
for ports in [input_ports_m, output_ports_m]:
for port_m in ports:
str_width = self._string_width(port_m.data_port.name, str_height)
if str_width > max_name_width:
max_name_width = str_width
#fill_color = self.port_color if not selected else self.state_selected_color
port_width = min(max_name_width, max_allowed_name_width)
port_pos_left_x = pos[0] + (width - port_width - margin) / 2
port_pos_right_x = port_pos_left_x + port_width + margin
port_pos_bottom_y = pos[1] - height - num_ports * (str_height + margin)
self._draw_rect(port_pos_left_x, port_pos_right_x, port_pos_bottom_y, pos[1] - height, depth, border_width,
fill_color, border_color)
def draw_port(port_m, num, is_input):
port_name = self._shorten_string(port_m.data_port.name, str_height, port_width)
string_pos_x = port_pos_left_x + margin / 2.
if not is_input:
string_pos_x += port_width
string_pos_y = pos[1] - height - margin / 2. - num * (str_height + margin)
self._write_string(port_name, string_pos_x, string_pos_y,
str_height, self.port_name_color, False, not is_input, depth + 0.01)
circle_pos_x = port_pos_left_x if is_input else port_pos_right_x
circle_pos_y = string_pos_y - margin / 2.
self._draw_circle(circle_pos_x, circle_pos_y, port_radius, depth + 0.02, stroke_width=margin / 5.,
border_color=self.port_name_color, fill_color=self.port_connector_fill_color)
return circle_pos_x, circle_pos_y
output_num = 0
for port_m in input_ports_m:
con_pos_x, con_pos_y = draw_port(port_m, output_num, True)
port_m.temp['gui']['editor']['outer_connector_radius'] = port_radius
port_m.temp['gui']['editor']['outer_connector_pos'] = (con_pos_x, con_pos_y)
output_num += 1
for port_m in output_ports_m:
con_pos_x, con_pos_y = draw_port(port_m, output_num, False)
port_m.temp['gui']['editor']['outer_connector_radius'] = port_radius
port_m.temp['gui']['editor']['outer_connector_pos'] = (con_pos_x, con_pos_y)
output_num += 1
glPopName()
return opengl_id, income_pos, outcome_pos, outcome_radius, resize_length | Draw a state with the given properties
This method is called by the controller to draw the specified (container) state.
:param name: Name of the state
:param pos: position (x, y) of the state
:param size: size (width, height) of the state
:param outcomes: outcomes of the state (list with outcome objects)
:param input_ports_m: input ports of the state
:param output_ports_m: output ports of the state
:param selected: whether to display the state as selected
:param active: in which color to display the state
:param depth: The z layer
:return: The OpenGL id and the positions of teh outcomes (as dictionary with outcome id as key) | entailment |
def draw_transition(self, from_pos, to_pos, width, waypoints=None, selected=False, depth=0):
"""Draw a state with the given properties
This method is called by the controller to draw the specified transition.
:param tuple from_pos: Starting position
:param tuple to_pos: Ending position
:param float width: A measure for the width of a transition line
:param list waypoints: A list of optional waypoints to connect in between
:param bool selected: Whether the transition shell be shown as active/selected
:param float depth: The Z layer
:return: The OpenGL id of the transition
:rtype: int
"""
if not waypoints:
waypoints = []
# "Generate" unique ID for each object
id = self.name_counter
self.name_counter += 1
glPushName(id)
self._set_closest_stroke_width(width)
color = self.transition_color if not selected else self.transition_selected_color
color.set()
points = [from_pos]
points.extend(waypoints)
last_p = to_pos # Transition endpoint
sec_last_p = points[len(points) - 1] # Point before endpoint
# Calculate max possible arrow length
length = min(width, dist(sec_last_p, last_p) / 2.)
mid, p2, p3 = self._calculate_arrow_points(last_p, sec_last_p, length)
self._draw_triangle(last_p, p2, p3, depth, fill_color=color)
points.append(mid)
# Draw the transitions as simple straight line connecting start- way- and endpoints
glBegin(GL_LINE_STRIP)
for point in points:
glVertex3f(point[0], point[1], depth)
glEnd()
self._set_closest_stroke_width(width / 1.5)
for waypoint in waypoints:
self._draw_circle(waypoint[0], waypoint[1], width / 6., depth + 1, fill_color=color)
glPopName()
return id | Draw a state with the given properties
This method is called by the controller to draw the specified transition.
:param tuple from_pos: Starting position
:param tuple to_pos: Ending position
:param float width: A measure for the width of a transition line
:param list waypoints: A list of optional waypoints to connect in between
:param bool selected: Whether the transition shell be shown as active/selected
:param float depth: The Z layer
:return: The OpenGL id of the transition
:rtype: int | entailment |
def _write_string(self, string, pos_x, pos_y, height, color, bold=False, align_right=False, depth=0.):
"""Write a string
Writes a string with a simple OpenGL method in the given size at the given position.
:param string: The string to draw
:param pos_x: x starting position
:param pos_y: y starting position
:param height: desired height
:param bold: flag whether to use a bold font
:param depth: the Z layer
"""
stroke_width = height / 8.
if bold:
stroke_width = height / 5.
color.set()
self._set_closest_stroke_width(stroke_width)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
pos_y -= height
if not align_right:
glTranslatef(pos_x, pos_y, depth)
else:
width = self._string_width(string, height)
glTranslatef(pos_x - width, pos_y, depth)
font_height = 119.5 # According to https://www.opengl.org/resources/libraries/glut/spec3/node78.html
scale_factor = height / font_height
glScalef(scale_factor, scale_factor, scale_factor)
for c in string:
# glTranslatef(0, 0, 0)
glutStrokeCharacter(GLUT_STROKE_ROMAN, ord(c))
# width = glutStrokeWidth(GLUT_STROKE_ROMAN, ord(c))
glPopMatrix() | Write a string
Writes a string with a simple OpenGL method in the given size at the given position.
:param string: The string to draw
:param pos_x: x starting position
:param pos_y: y starting position
:param height: desired height
:param bold: flag whether to use a bold font
:param depth: the Z layer | entailment |
def prepare_selection(self, pos_x, pos_y, width, height):
"""Prepares the selection rendering
In order to find out the object being clicked on, the scene has to be rendered again around the clicked position
:param pos_x: x coordinate
:param pos_y: y coordinate
"""
glSelectBuffer(self.name_counter * 6)
viewport = glGetInteger(GL_VIEWPORT)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glRenderMode(GL_SELECT)
glLoadIdentity()
if width < 1:
width = 1
if height < 1:
height = 1
pos_x += width / 2.
pos_y += height / 2.
# The system y axis is inverse to the OpenGL y axis
gluPickMatrix(pos_x, viewport[3] - pos_y + viewport[1], width, height, viewport)
self._apply_orthogonal_view() | Prepares the selection rendering
In order to find out the object being clicked on, the scene has to be rendered again around the clicked position
:param pos_x: x coordinate
:param pos_y: y coordinate | entailment |
def find_selection():
"""Finds the selected ids
After the scene has been rendered again in selection mode, this method gathers and returns the ids of the
selected object and restores the matrices.
:return: The selection stack
"""
hits = glRenderMode(GL_RENDER)
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
return hits | Finds the selected ids
After the scene has been rendered again in selection mode, this method gathers and returns the ids of the
selected object and restores the matrices.
:return: The selection stack | entailment |
def _set_closest_stroke_width(self, width):
"""Sets the line width to the closest supported one
Not all line widths are supported. This function queries both minimum and maximum as well as the step size of
the line width and calculates the width, which is closest to the given one. This width is then set.
:param width: The desired line width
"""
# Adapt line width to zooming level
width *= self.pixel_to_size_ratio() / 6.
stroke_width_range = glGetFloatv(GL_LINE_WIDTH_RANGE)
stroke_width_granularity = glGetFloatv(GL_LINE_WIDTH_GRANULARITY)
if width < stroke_width_range[0]:
glLineWidth(stroke_width_range[0])
return
if width > stroke_width_range[1]:
glLineWidth(stroke_width_range[1])
return
glLineWidth(round(width / stroke_width_granularity) * stroke_width_granularity) | Sets the line width to the closest supported one
Not all line widths are supported. This function queries both minimum and maximum as well as the step size of
the line width and calculates the width, which is closest to the given one. This width is then set.
:param width: The desired line width | entailment |
def _draw_circle(self, pos_x, pos_y, radius, depth, stroke_width=1., fill_color=None, border_color=None,
from_angle=0., to_angle=2 * pi):
"""Draws a circle
Draws a circle with a line segment a desired position with desired size.
:param float pos_x: Center x position
:param float pos_y: Center y position
:param float depth: The Z layer
:param float radius: Radius of the circle
"""
visible = False
# Check whether circle center is in the viewport
if not self.point_outside_view((pos_x, pos_y)):
visible = True
# Check whether at least on point on the border of the circle is within the viewport
if not visible:
for i in range(0, 8):
angle = 2 * pi / 8. * i
x = pos_x + cos(angle) * radius
y = pos_y + sin(angle) * radius
if not self.point_outside_view((x, y)):
visible = True
break
if not visible:
return False
angle_sum = to_angle - from_angle
if angle_sum < 0:
angle_sum = float(to_angle + 2 * pi - from_angle)
segments = self.pixel_to_size_ratio() * radius * 1.5
segments = max(4, segments)
segments = int(round(segments * angle_sum / (2. * pi)))
types = []
if fill_color is not None:
types.append(GL_POLYGON)
if border_color is not None:
types.append(GL_LINE_LOOP)
for type in types:
if type == GL_POLYGON:
fill_color.set()
else:
self._set_closest_stroke_width(stroke_width)
border_color.set()
glBegin(type)
angle = from_angle
for i in range(0, segments):
x = pos_x + cos(angle) * radius
y = pos_y + sin(angle) * radius
glVertex3f(x, y, depth)
angle += angle_sum / (segments - 1)
if angle > 2 * pi:
angle -= 2 * pi
if i == segments - 2:
angle = to_angle
glEnd()
return True | Draws a circle
Draws a circle with a line segment a desired position with desired size.
:param float pos_x: Center x position
:param float pos_y: Center y position
:param float depth: The Z layer
:param float radius: Radius of the circle | entailment |
def _apply_orthogonal_view(self):
"""Orthogonal view with respect to current aspect ratio
"""
left, right, bottom, top = self.get_view_coordinates()
glOrtho(left, right, bottom, top, -10, 0) | Orthogonal view with respect to current aspect ratio | entailment |
def tsam_cluster(timeseries_df, typical_periods=10, how='daily'):
"""
Parameters
----------
df : pd.DataFrame
DataFrame with timeseries to cluster
Returns
-------
timeseries : pd.DataFrame
Clustered timeseries
"""
if how == 'daily':
hours = 24
if how == 'weekly':
hours = 168
aggregation = tsam.TimeSeriesAggregation(
timeseries_df,
noTypicalPeriods=typical_periods,
rescaleClusterPeriods=False,
hoursPerPeriod=hours,
clusterMethod='hierarchical')
timeseries = aggregation.createTypicalPeriods()
cluster_weights = aggregation.clusterPeriodNoOccur
# get the medoids/ the clusterCenterIndices
clusterCenterIndices = aggregation.clusterCenterIndices
# get all index for every hour of that day of the clusterCenterIndices
start = []
# get the first hour of the clusterCenterIndices (days start with 0)
for i in clusterCenterIndices:
start.append(i * hours)
# get a list with all hours belonging to the clusterCenterIndices
nrhours = []
for j in start:
nrhours.append(j)
x = 1
while x < hours:
j = j + 1
nrhours.append(j)
x = x + 1
# get the origial Datetimeindex
dates = timeseries_df.iloc[nrhours].index
return timeseries, cluster_weights, dates, hours | Parameters
----------
df : pd.DataFrame
DataFrame with timeseries to cluster
Returns
-------
timeseries : pd.DataFrame
Clustered timeseries | entailment |
def update_data_frames(network, cluster_weights, dates, hours):
""" Updates the snapshots, snapshots weights and the dataframes based on
the original data in the network and the medoids created by clustering
these original data.
Parameters
-----------
network : pyPSA network object
cluster_weights: dictionary
dates: Datetimeindex
Returns
-------
network
"""
network.snapshot_weightings = network.snapshot_weightings.loc[dates]
network.snapshots = network.snapshot_weightings.index
# set new snapshot weights from cluster_weights
snapshot_weightings = []
for i in cluster_weights.values():
x = 0
while x < hours:
snapshot_weightings.append(i)
x += 1
for i in range(len(network.snapshot_weightings)):
network.snapshot_weightings[i] = snapshot_weightings[i]
# put the snapshot in the right order
network.snapshots.sort_values()
network.snapshot_weightings.sort_index()
return network | Updates the snapshots, snapshots weights and the dataframes based on
the original data in the network and the medoids created by clustering
these original data.
Parameters
-----------
network : pyPSA network object
cluster_weights: dictionary
dates: Datetimeindex
Returns
-------
network | entailment |
def daily_bounds(network, snapshots):
""" This will bound the storage level to 0.5 max_level every 24th hour.
"""
sus = network.storage_units
# take every first hour of the clustered days
network.model.period_starts = network.snapshot_weightings.index[0::24]
network.model.storages = sus.index
def day_rule(m, s, p):
"""
Sets the soc of the every first hour to the soc of the last hour
of the day (i.e. + 23 hours)
"""
return (
m.state_of_charge[s, p] ==
m.state_of_charge[s, p + pd.Timedelta(hours=23)])
network.model.period_bound = po.Constraint(
network.model.storages, network.model.period_starts, rule=day_rule) | This will bound the storage level to 0.5 max_level every 24th hour. | entailment |
def setup_installation():
"""Install necessary GUI resources
By default, RAFCON should be installed via `setup.py` (`pip install rafcon`). Thereby, all resources are being
installed. However, if this is not the case, one can set the `RAFCON_CHECK_INSTALLATION` env variable to `True`.
Then, the installation will be performed before starting the GUI.
"""
if os.environ.get("RAFCON_CHECK_INSTALLATION", False) == "True":
rafcon_root = os.path.dirname(rafcon.__file__)
installation.assets_folder = os.path.join(rafcon_root, 'gui', 'assets')
installation.share_folder = os.path.join(os.path.dirname(os.path.dirname(rafcon_root)), 'share')
installation.install_fonts(logger, restart=True)
installation.install_gtk_source_view_styles(logger)
installation.install_libraries(logger, overwrite=False) | Install necessary GUI resources
By default, RAFCON should be installed via `setup.py` (`pip install rafcon`). Thereby, all resources are being
installed. However, if this is not the case, one can set the `RAFCON_CHECK_INSTALLATION` env variable to `True`.
Then, the installation will be performed before starting the GUI. | entailment |
def setup_argument_parser():
"""Sets up teh parser with the required arguments
:return: The parser object
"""
default_config_path = filesystem.get_default_config_path()
filesystem.create_path(default_config_path)
parser = core_singletons.argument_parser
parser.add_argument('-n', '--new', action='store_true', help=_("whether to create a new state-machine"))
parser.add_argument('-o', '--open', action='store', nargs='*', type=parse_state_machine_path,
dest='state_machine_paths', metavar='path',
help=_("specify directories of state-machines that shall be opened. Paths must contain a "
"statemachine.json file"))
parser.add_argument('-c', '--config', action='store', type=config_path, metavar='path', dest='config_path',
default=default_config_path, nargs='?', const=default_config_path,
help=_("path to the configuration file config.yaml. Use 'None' to prevent the generation of a "
"config file and use the default configuration. Default: {0}"
"").format(default_config_path))
parser.add_argument('-g', '--gui_config', action='store', type=config_path, metavar='path', dest='gui_config_path',
default=default_config_path, nargs='?', const=default_config_path,
help=_("path to the configuration file gui_config.yaml. "
"Use 'None' to prevent the generation of a config file and use the default "
"configuration. Default: {0}").format(default_config_path))
parser.add_argument('-ss', '--start_state_machine', dest='start_state_machine_flag', action='store_true',
help=_("a flag to specify if the first state machine of -o should be started after opening"))
parser.add_argument('-s', '--start_state_path', metavar='path', dest='start_state_path', default=None, nargs='?',
help=_("path within a state machine to the state that should be launched which consists of "
"state ids e.g. QPOXGD/YVWJKZ where QPOXGD is the root state and YVWJKZ its child states"
" to start from."))
parser.add_argument('-q', '--quit', dest='quit_flag', action='store_true',
help=_("a flag to specify if the gui should quit after launching a state machine"))
return parser | Sets up teh parser with the required arguments
:return: The parser object | entailment |
def register_view(self, view):
"""Called when the View was registered"""
super(ScopedVariableListController, self).register_view(view)
view['name_col'].add_attribute(view['name_text'], 'text', self.NAME_STORAGE_ID)
if not isinstance(self.model.state, LibraryState) and self.model.state.get_next_upper_library_root_state() is None:
view['name_text'].set_property("editable", True)
view['data_type_col'].add_attribute(view['data_type_text'], 'text', self.DATA_TYPE_NAME_STORAGE_ID)
if not isinstance(self.model.state, LibraryState) and self.model.state.get_next_upper_library_root_state() is None:
view['data_type_text'].set_property("editable", True)
if isinstance(view, ScopedVariablesListView):
view['default_value_col'].add_attribute(view['default_value_text'], 'text', self.DEFAULT_VALUE_STORAGE_ID)
if not isinstance(self.model.state, LibraryState) and self.model.state.get_next_upper_library_root_state() is None:
view['default_value_text'].set_property("editable", True)
self._apply_value_on_edited_and_focus_out(view['default_value_text'],
self.apply_new_scoped_variable_default_value)
self._apply_value_on_edited_and_focus_out(view['name_text'], self.apply_new_scoped_variable_name)
self._apply_value_on_edited_and_focus_out(view['data_type_text'], self.apply_new_scoped_variable_type)
if isinstance(self.model, ContainerStateModel):
self.reload_scoped_variables_list_store() | Called when the View was registered | entailment |
def register_actions(self, shortcut_manager):
"""Register callback methods for triggered actions
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions.
"""
shortcut_manager.add_callback_for_action("delete", self.remove_action_callback)
shortcut_manager.add_callback_for_action("add", self.add_action_callback)
shortcut_manager.add_callback_for_action("copy", self.copy_action_callback)
shortcut_manager.add_callback_for_action("cut", self.cut_action_callback)
shortcut_manager.add_callback_for_action("paste", self.paste_action_callback) | Register callback methods for triggered actions
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions. | entailment |
def paste_action_callback(self, *event):
"""Callback method for paste action
The method trigger the clipboard paste of the list of scoped variables in the clipboard or in case this list is
empty and there are other port types selected in the clipboard it will trigger the paste with convert flag.
The convert flag will cause the insertion of scoped variables with the same names, data types and default values
the objects of differing port type (in the clipboard) have.
"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
if not global_clipboard.model_copies["scoped_variables"] and \
(global_clipboard.model_copies["input_data_ports"] or
global_clipboard.model_copies["output_data_ports"]):
global_clipboard.paste(self.model, limited=['scoped_variables'], convert=True)
else:
global_clipboard.paste(self.model, limited=['scoped_variables'])
return True | Callback method for paste action
The method trigger the clipboard paste of the list of scoped variables in the clipboard or in case this list is
empty and there are other port types selected in the clipboard it will trigger the paste with convert flag.
The convert flag will cause the insertion of scoped variables with the same names, data types and default values
the objects of differing port type (in the clipboard) have. | entailment |
def on_add(self, widget, data=None):
"""Create a new scoped variable with default values"""
if isinstance(self.model, ContainerStateModel):
try:
scoped_var_ids = gui_helper_state_machine.add_scoped_variable_to_selected_states(selected_states=[self.model])
if scoped_var_ids:
self.select_entry(scoped_var_ids[self.model.state])
except ValueError as e:
logger.warning("The scoped variable couldn't be added: {0}".format(e))
return False
return True | Create a new scoped variable with default values | entailment |
def remove_core_element(self, model):
"""Remove respective core element of handed scoped variable model
:param ScopedVariableModel model: Scoped variable model which core element should be removed
:return:
"""
assert model.scoped_variable.parent is self.model.state
gui_helper_state_machine.delete_core_element_of_model(model) | Remove respective core element of handed scoped variable model
:param ScopedVariableModel model: Scoped variable model which core element should be removed
:return: | entailment |
def apply_new_scoped_variable_name(self, path, new_name):
"""Applies the new name of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_name: New name
"""
data_port_id = self.list_store[path][self.ID_STORAGE_ID]
try:
if self.model.state.scoped_variables[data_port_id].name != new_name:
self.model.state.scoped_variables[data_port_id].name = new_name
except TypeError as e:
logger.error("Error while changing port name: {0}".format(e)) | Applies the new name of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_name: New name | entailment |
def apply_new_scoped_variable_type(self, path, new_variable_type_str):
"""Applies the new data type of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_variable_type_str: New data type as str
"""
data_port_id = self.list_store[path][self.ID_STORAGE_ID]
try:
if self.model.state.scoped_variables[data_port_id].data_type.__name__ != new_variable_type_str:
self.model.state.scoped_variables[data_port_id].change_data_type(new_variable_type_str)
except ValueError as e:
logger.error("Error while changing data type: {0}".format(e)) | Applies the new data type of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_variable_type_str: New data type as str | entailment |
def apply_new_scoped_variable_default_value(self, path, new_default_value_str):
"""Applies the new default value of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_default_value_str: New default value as string
"""
data_port_id = self.get_list_store_row_from_cursor_selection()[self.ID_STORAGE_ID]
try:
if str(self.model.state.scoped_variables[data_port_id].default_value) != new_default_value_str:
self.model.state.scoped_variables[data_port_id].default_value = new_default_value_str
except (TypeError, AttributeError) as e:
logger.error("Error while changing default value: {0}".format(e)) | Applies the new default value of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_default_value_str: New default value as string | entailment |
def reload_scoped_variables_list_store(self):
"""Reloads the scoped variable list store from the data port models"""
if isinstance(self.model, ContainerStateModel):
tmp = self.get_new_list_store()
for sv_model in self.model.scoped_variables:
data_type = sv_model.scoped_variable.data_type
# get name of type (e.g. ndarray)
data_type_name = data_type.__name__
# get module of type, e.g. numpy
data_type_module = data_type.__module__
# if the type is not a builtin type, also show the module
if data_type_module not in ['__builtin__', 'builtins']:
data_type_name = data_type_module + '.' + data_type_name
tmp.append([sv_model.scoped_variable.name, data_type_name,
str(sv_model.scoped_variable.default_value), sv_model.scoped_variable.data_port_id,
sv_model])
tms = Gtk.TreeModelSort(model=tmp)
tms.set_sort_column_id(0, Gtk.SortType.ASCENDING)
tms.set_sort_func(0, compare_variables)
tms.sort_column_changed()
tmp = tms
self.list_store.clear()
for elem in tmp:
self.list_store.append(elem[:])
else:
raise RuntimeError("The reload_scoped_variables_list_store function should be never called for "
"a non Container State Model") | Reloads the scoped variable list store from the data port models | entailment |
def store_session():
""" Stores reference backup information for all open tabs into runtime config
The backup of never stored tabs (state machines) and not stored state machine changes will be triggered a last
time to secure data lose.
"""
from rafcon.gui.singleton import state_machine_manager_model, global_runtime_config
from rafcon.gui.models.auto_backup import AutoBackupModel
from rafcon.gui.models import AbstractStateModel
from rafcon.gui.singleton import main_window_controller
# check if there are dirty state machines -> use backup file structure maybe it is already stored
for sm_m in state_machine_manager_model.state_machines.values():
if sm_m.auto_backup:
if sm_m.state_machine.marked_dirty:
sm_m.auto_backup.perform_temp_storage()
else:
# generate a backup
sm_m.auto_backup = AutoBackupModel(sm_m)
# collect order of tab state machine ids from state machines editor and find selected state machine page number
state_machines_editor_ctrl = main_window_controller.get_controller('state_machines_editor_ctrl')
number_of_pages = state_machines_editor_ctrl.view['notebook'].get_n_pages()
selected_page_number = None
list_of_tab_meta = []
for page_number in range(number_of_pages):
page = state_machines_editor_ctrl.view['notebook'].get_nth_page(page_number)
sm_id = state_machines_editor_ctrl.get_state_machine_id_for_page(page)
if sm_id == state_machine_manager_model.selected_state_machine_id:
selected_page_number = page_number
# backup state machine selection
selection_of_sm = []
for model in state_machine_manager_model.state_machines[sm_id].selection.get_all():
if isinstance(model, AbstractStateModel):
# TODO extend to full range of selection -> see core_identifier action-module
selection_of_sm.append(model.state.get_path())
list_of_tab_meta.append({'backup_meta': state_machine_manager_model.state_machines[sm_id].auto_backup.meta,
'selection': selection_of_sm})
# store final state machine backup meta data to backup session tabs and selection for the next run
global_runtime_config.set_config_value('open_tabs', list_of_tab_meta)
global_runtime_config.set_config_value('selected_state_machine_page_number', selected_page_number) | Stores reference backup information for all open tabs into runtime config
The backup of never stored tabs (state machines) and not stored state machine changes will be triggered a last
time to secure data lose. | entailment |
def restore_session_from_runtime_config():
""" Restore stored tabs from runtime config
The method checks if the last status of a state machine is in the backup or in tis original path and loads it
from there. The original path of these state machines are also insert into the recently opened state machines
list.
"""
# TODO add a dirty lock for a crashed rafcon instance also into backup session feature
# TODO in case a dialog is needed to give the user control
# TODO combine this and auto-backup in one structure/controller/observer
from rafcon.gui.singleton import state_machine_manager_model, global_runtime_config, global_gui_config
from rafcon.gui.models.auto_backup import recover_state_machine_from_backup
from rafcon.gui.singleton import main_window_controller
# check if session storage exists
open_tabs = global_runtime_config.get_config_value('open_tabs', None)
if open_tabs is None:
logger.info("No session found for recovery")
return
# load and restore state machines like they were opened before
open_sm = []
for idx, tab_meta_dict in enumerate(open_tabs):
start_time = time.time()
backup_meta_dict = tab_meta_dict['backup_meta']
from_backup_path = None
open_sm.append(None)
# TODO do this decision before storing or maybe store the last stored time in the auto backup?!
# pick folder name dependent on time, and backup meta data existence
# problem is that the backup time is maybe not the best choice
if 'last_backup' in backup_meta_dict:
last_backup_time = storage_utils.get_float_time_for_string(backup_meta_dict['last_backup']['time'])
if 'last_saved' in backup_meta_dict:
last_save_time = storage_utils.get_float_time_for_string(backup_meta_dict['last_saved']['time'])
backup_marked_dirty = backup_meta_dict['last_backup']['marked_dirty']
if last_backup_time > last_save_time and backup_marked_dirty:
from_backup_path = backup_meta_dict['last_backup']['file_system_path']
else:
from_backup_path = backup_meta_dict['last_backup']['file_system_path']
elif 'last_saved' in backup_meta_dict:
# print("### open last saved", sm_meta_dict['last_saved']['file_system_path'])
pass
else:
logger.error("A tab was stored into session storage dictionary {0} without any recovery path"
"".format(backup_meta_dict))
continue
# check in case that the backup folder is valid or use last saved path
if from_backup_path is not None and not os.path.isdir(from_backup_path):
logger.warning("The backup of tab {0} from backup path {1} was not possible. "
"The last saved path will be used for recovery, which could result is loss of changes."
"".format(idx, from_backup_path))
from_backup_path = None
# open state machine
if from_backup_path is not None:
# open state machine, recover mark dirty flags, cleans dirty lock files
logger.info("Restoring from backup {0}".format(from_backup_path))
state_machine_m = recover_state_machine_from_backup(from_backup_path)
else:
if 'last_saved' not in backup_meta_dict or backup_meta_dict['last_saved']['file_system_path'] is None:
continue
path = backup_meta_dict['last_saved']['file_system_path']
if not os.path.isdir(path):
logger.warning("The tab can not be open. The backup of tab {0} from common path {1} was not "
"possible.".format(idx, path))
continue
# logger.info("backup from last saved", path, sm_meta_dict)
state_machine = storage.load_state_machine_from_path(path)
state_machine_manager_model.state_machine_manager.add_state_machine(state_machine)
wait_for_gui()
state_machine_m = state_machine_manager_model.state_machines[state_machine.state_machine_id]
duration = time.time() - start_time
stat = state_machine_m.state_machine.root_state.get_states_statistics(0)
logger.info("It took {0:.3}s to restore {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1]))
open_sm[idx] = state_machine_m
global_runtime_config.extend_recently_opened_by_current_open_state_machines()
wait_for_gui()
# restore all state machine selections separate to avoid states-editor and state editor creation problems
for idx, tab_meta_dict in enumerate(open_tabs):
state_machine_m = open_sm[idx]
if state_machine_m is None: # state machine could not be open
return
# restore state machine selection
selected_model_set = []
for core_element_identifier in tab_meta_dict['selection']:
selected_model_set.append(state_machine_m.get_state_model_by_path(core_element_identifier))
state_machine_m.selection.set(selected_model_set)
# restore backup-ed tab selection
selected_page_number = global_runtime_config.get_config_value('selected_state_machine_page_number', None)
if selected_page_number is not None:
selected_state_machine_page_number = selected_page_number
if selected_state_machine_page_number is None:
return
state_machines_editor_ctrl = main_window_controller.get_controller('state_machines_editor_ctrl')
if not state_machines_editor_ctrl.view['notebook'].get_n_pages() >= selected_page_number:
logger.warning("Page id {0} does not exist so session restore can not re-create selection."
"".format(selected_page_number))
return
notebook = state_machines_editor_ctrl.view['notebook']
page = state_machines_editor_ctrl.on_switch_page(notebook, None, selected_page_number)
selected_sm_id = state_machine_manager_model.selected_state_machine_id
if not selected_sm_id == state_machines_editor_ctrl.get_state_machine_id_for_page(page):
logger.warning("Selection of page was not set correctly so session restore can not re-create selection.")
return | Restore stored tabs from runtime config
The method checks if the last status of a state machine is in the backup or in tis original path and loads it
from there. The original path of these state machines are also insert into the recently opened state machines
list. | entailment |
def add_logging_level(level_name, level_num, method_name=None):
"""Add new logging level
Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.
`method_name` becomes a convenience method for both `logging` itself and the class returned by
`logging.getLoggerClass()` (usually just `logging.Logger`). If `method_name` is not specified, `level_name.lower()`
is used.
:param str level_name: the level name
:param int level_num: the level number/value
:raises AttributeError: if the level
name is already an attribute of the `logging` module or if the method name is already present
Example
-------
>>> add_logging_level('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
"""
if not method_name:
method_name = level_name.lower()
if hasattr(logging, level_name):
raise AttributeError('{} already defined in logging module'.format(level_name))
if hasattr(logging, method_name):
raise AttributeError('{} already defined in logging module'.format(method_name))
if hasattr(logging.getLoggerClass(), method_name):
raise AttributeError('{} already defined in logger class'.format(method_name))
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def log_for_level(self, message, *args, **kwargs):
if self.isEnabledFor(level_num):
self._log(level_num, message, args, **kwargs)
def log_to_root(message, *args, **kwargs):
logging.log(level_num, message, *args, **kwargs)
logging.addLevelName(level_num, level_name)
setattr(logging, level_name, level_num)
setattr(logging.getLoggerClass(), method_name, log_for_level)
setattr(logging, method_name, log_to_root) | Add new logging level
Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.
`method_name` becomes a convenience method for both `logging` itself and the class returned by
`logging.getLoggerClass()` (usually just `logging.Logger`). If `method_name` is not specified, `level_name.lower()`
is used.
:param str level_name: the level name
:param int level_num: the level number/value
:raises AttributeError: if the level
name is already an attribute of the `logging` module or if the method name is already present
Example
-------
>>> add_logging_level('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5 | entailment |
def get_logger(name):
"""Returns a logger for the given name
The function is basically a wrapper for logging.getLogger and only ensures that the namespace is within "rafcon."
and that the propagation is enabled.
:param str name: The namespace of the new logger
:return: Logger object with given namespace
:rtype: logging.Logger
"""
if name in existing_loggers:
return existing_loggers[name]
# Ensure that all logger are within the RAFCON root namespace
namespace = name if name.startswith(rafcon_root + ".") else rafcon_root + "." + name
logger = logging.getLogger(namespace)
logger.propagate = True
existing_loggers[name] = logger
return logger | Returns a logger for the given name
The function is basically a wrapper for logging.getLogger and only ensures that the namespace is within "rafcon."
and that the propagation is enabled.
:param str name: The namespace of the new logger
:return: Logger object with given namespace
:rtype: logging.Logger | entailment |
def etrago(args):
"""The etrago function works with following arguments:
Parameters
----------
db : str
``'oedb'``,
Name of Database session setting stored in *config.ini* of *.egoio*
gridversion : NoneType or str
``'v0.2.11'``,
Name of the data version number of oedb: state ``'None'`` for
model_draft (sand-box) or an explicit version number
(e.g. 'v0.2.10') for the grid schema.
method : str
``'lopf'``,
Choose between a non-linear power flow ('pf') or
a linear optimal power flow ('lopf').
pf_post_lopf : bool
False,
Option to run a non-linear power flow (pf) directly after the
linear optimal power flow (and thus the dispatch) has finished.
start_snapshot : int
1,
Start hour of the scenario year to be calculated.
end_snapshot : int
2,
End hour of the scenario year to be calculated.
solver : str
'glpk',
Choose your preferred solver. Current options: 'glpk' (open-source),
'cplex' or 'gurobi'.
scn_name : str
'Status Quo',
Choose your scenario. Currently, there are three different
scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not
want to use the full German dataset, you can use the excerpt of
Schleswig-Holstein by adding the acronym SH to the scenario
name (e.g. 'SH Status Quo').
scn_extension : NoneType or list
None,
Choose extension-scenarios which will be added to the existing
network container. Data of the extension scenarios are located in
extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus)
with the prefix 'extension_'.
Currently there are three overlay networks:
'nep2035_confirmed' includes all planed new lines confirmed by the
Bundesnetzagentur
'nep2035_b2' includes all new lines planned by the
Netzentwicklungsplan 2025 in scenario 2035 B2
'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and
adds BE and NO as electrical neighbours
scn_decommissioning : str
None,
Choose an extra scenario which includes lines you want to decommise
from the existing network. Data of the decommissioning scenarios are
located in extension-tables
(e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix
'decommissioning_'.
Currently, there are two decommissioning_scenarios which are linked to
extension-scenarios:
'nep2035_confirmed' includes all lines that will be replaced in
confirmed projects
'nep2035_b2' includes all lines that will be replaced in
NEP-scenario 2035 B2
lpfile : obj
False,
State if and where you want to save pyomo's lp file. Options:
False or '/path/tofolder'.import numpy as np
csv_export : obj
False,
State if and where you want to save results as csv files.Options:
False or '/path/tofolder'.
db_export : bool
False,
State if you want to export the results of your calculation
back to the database.
extendable : list
['network', 'storages'],
Choose components you want to optimize.
Settings can be added in /tools/extendable.py.
The most important possibilities:
'network': set all lines, links and transformers extendable
'german_network': set lines and transformers in German grid
extendable
'foreign_network': set foreign lines and transformers extendable
'transformers': set all transformers extendable
'overlay_network': set all components of the 'scn_extension'
extendable
'storages': allow to install extendable storages
(unlimited in size) at each grid node in order to meet
the flexibility demand.
'network_preselection': set only preselected lines extendable,
method is chosen in function call
generator_noise : bool or int
State if you want to apply a small random noise to the marginal costs
of each generator in order to prevent an optima plateau. To reproduce
a noise, choose the same integer (seed number).
minimize_loading : bool
False,
...
ramp_limits : bool
False,
State if you want to consider ramp limits of generators.
Increases time for solving significantly.
Only works when calculating at least 30 snapshots.
extra_functionality : str or None
None,
Choose name of extra functionality described in etrago/utilities.py
"min_renewable_share" to set a minimal share of renewable energy or
"max_line_ext" to set an overall maximum of line expansion.
When activating snapshot_clustering or minimize_loading these
extra_funtionalities are overwritten and therefore neglected.
network_clustering_kmeans : bool or int
False,
State if you want to apply a clustering of all network buses down to
only ``'k'`` buses. The weighting takes place considering generation
and load
at each node. If so, state the number of k you want to apply. Otherwise
put False. This function doesn't work together with
``'line_grouping = True'``.
load_cluster : bool or obj
state if you want to load cluster coordinates from a previous run:
False or /path/tofile (filename similar to ./cluster_coord_k_n_result).
network_clustering_ehv : bool
False,
Choose if you want to cluster the full HV/EHV dataset down to only the
EHV buses. In that case, all HV buses are assigned to their closest EHV
sub-station, taking into account the shortest distance on power lines.
snapshot_clustering : bool or int
False,
State if you want to cluster the snapshots and run the optimization
only on a subset of snapshot periods. The int value defines the number
of periods (i.e. days) which will be clustered to.
Move to PyPSA branch:features/snapshot_clustering
parallelisation : bool
False,
Choose if you want to calculate a certain number of snapshots in
parallel. If yes, define the respective amount in the if-clause
execution below. Otherwise state False here.
line_grouping : bool
True,
State if you want to group lines that connect the same two buses
into one system.
branch_capacity_factor : dict
{'HV': 0.5, 'eHV' : 0.7},
Add a factor here if you want to globally change line capacities
(e.g. to "consider" an (n-1) criterion or for debugging purposes).
load_shedding : bool
False,
State here if you want to make use of the load shedding function which
is helpful when debugging: a very expensive generator is set to each
bus and meets the demand when regular
generators cannot do so.
foreign_lines : dict
{'carrier':'AC', 'capacity': 'osmTGmod}'
Choose transmission technology and capacity of foreign lines:
'carrier': 'AC' or 'DC'
'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer'
comments : str
None
Returns
-------
network : `pandas.DataFrame<dataframe>`
eTraGo result network based on `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_
"""
conn = db.connection(section=args['db'])
Session = sessionmaker(bind=conn)
session = Session()
# additional arguments cfgpath, version, prefix
if args['gridversion'] is None:
args['ormcls_prefix'] = 'EgoGridPfHv'
else:
args['ormcls_prefix'] = 'EgoPfHv'
scenario = NetworkScenario(session,
version=args['gridversion'],
prefix=args['ormcls_prefix'],
method=args['method'],
start_snapshot=args['start_snapshot'],
end_snapshot=args['end_snapshot'],
scn_name=args['scn_name'])
network = scenario.build_network()
# add coordinates
network = add_coordinates(network)
# Set countrytags of buses, lines, links and transformers
network = geolocation_buses(network, session)
# Set q_sets of foreign loads
network = set_q_foreign_loads(network, cos_phi=1)
# Change transmission technology and/or capacity of foreign lines
if args['foreign_lines']['carrier'] == 'DC':
foreign_links(network)
network = geolocation_buses(network, session)
if args['foreign_lines']['capacity'] != 'osmTGmod':
crossborder_capacity(
network, args['foreign_lines']['capacity'],
args['branch_capacity_factor'])
# TEMPORARY vague adjustment due to transformer bug in data processing
if args['gridversion'] == 'v0.2.11':
network.transformers.x = network.transformers.x * 0.0001
# set SOC at the beginning and end of the period to equal values
network.storage_units.cyclic_state_of_charge = True
# set extra_functionality
if args['extra_functionality'] is not None:
extra_functionality = eval(args['extra_functionality'])
elif args['extra_functionality'] is None:
extra_functionality = args['extra_functionality']
# set disaggregated_network to default
disaggregated_network = None
# set clustering to default
clustering = None
if args['generator_noise'] is not False:
# add random noise to all generators
s = np.random.RandomState(args['generator_noise'])
network.generators.marginal_cost[network.generators.bus.isin(
network.buses.index[network.buses.country_code == 'DE'])] += \
abs(s.normal(0, 0.1, len(network.generators.marginal_cost[
network.generators.bus.isin(network.buses.index[
network.buses.country_code == 'DE'])])))
# for SH scenario run do data preperation:
if (args['scn_name'] == 'SH Status Quo' or
args['scn_name'] == 'SH NEP 2035'):
data_manipulation_sh(network)
# grouping of parallel lines
if args['line_grouping']:
group_parallel_lines(network)
# Branch loading minimization
if args['minimize_loading']:
extra_functionality = loading_minimization
# scenario extensions
if args['scn_extension'] is not None:
for i in range(len(args['scn_extension'])):
network = extension(
network,
session,
version=args['gridversion'],
scn_extension=args['scn_extension'][i],
start_snapshot=args['start_snapshot'],
end_snapshot=args['end_snapshot'])
network = geolocation_buses(network, session)
# scenario decommissioning
if args['scn_decommissioning'] is not None:
network = decommissioning(
network,
session,
args)
# Add missing lines in Munich and Stuttgart
network = add_missing_components(network)
# set Branch capacity factor for lines and transformer
if args['branch_capacity_factor']:
set_branch_capacity(network, args)
# investive optimization strategies
if args['extendable'] != []:
network = extendable(
network,
args,
line_max=4)
network = convert_capital_costs(
network, args['start_snapshot'], args['end_snapshot'])
# skip snapshots
if args['skip_snapshots']:
network.snapshots = network.snapshots[::args['skip_snapshots']]
network.snapshot_weightings = network.snapshot_weightings[
::args['skip_snapshots']] * args['skip_snapshots']
# snapshot clustering
if not args['snapshot_clustering'] is False:
network = snapshot_clustering(
network, how='daily', clusters=args['snapshot_clustering'])
extra_functionality = daily_bounds # daily_bounds or other constraint
# load shedding in order to hunt infeasibilities
if args['load_shedding']:
load_shedding(network)
# ehv network clustering
if args['network_clustering_ehv']:
network.generators.control = "PV"
busmap = busmap_from_psql(
network,
session,
scn_name=(
args['scn_name'] if args['scn_extension']==None
else args['scn_name']+'_ext_'+'_'.join(
args['scn_extension'])))
network = cluster_on_extra_high_voltage(
network, busmap, with_time=True)
# k-mean clustering
if not args['network_clustering_kmeans'] == False:
clustering = kmean_clustering(
network,
n_clusters=args['network_clustering_kmeans'],
load_cluster=args['load_cluster'],
line_length_factor=1,
remove_stubs=False,
use_reduced_coordinates=False,
bus_weight_tocsv=None,
bus_weight_fromcsv=None,
n_init=10,
max_iter=100,
tol=1e-6,
n_jobs=-1)
disaggregated_network = (
network.copy() if args.get('disaggregation') else None)
network = clustering.network.copy()
if args['ramp_limits']:
ramp_limits(network)
# preselection of extendable lines
if 'network_preselection' in args['extendable']:
extension_preselection(network, args, 'snapshot_clustering', 2)
# parallisation
if args['parallelisation']:
parallelisation(
network,
start_snapshot=args['start_snapshot'],
end_snapshot=args['end_snapshot'],
group_size=1,
solver_name=args['solver'],
solver_options=args['solver_options'],
extra_functionality=extra_functionality)
# start linear optimal powerflow calculations
elif args['method'] == 'lopf':
x = time.time()
network.lopf(
network.snapshots,
solver_name=args['solver'],
solver_options=args['solver_options'],
extra_functionality=extra_functionality, formulation="angles")
y = time.time()
z = (y - x) / 60
# z is time for lopf in minutes
print("Time for LOPF [min]:", round(z, 2))
# start non-linear powerflow simulation
elif args['method'] is 'pf':
network.pf(scenario.timeindex)
# calc_line_losses(network)
if args['pf_post_lopf']:
x = time.time()
pf_solution = pf_post_lopf(network,
args,
extra_functionality,
add_foreign_lopf=True)
y = time.time()
z = (y - x) / 60
print("Time for PF [min]:", round(z, 2))
calc_line_losses(network)
network = distribute_q(network, allocation='p_nom')
if not args['extendable'] == []:
print_expansion_costs(network, args)
if clustering:
disagg = args.get('disaggregation')
skip = () if args['pf_post_lopf'] else ('q',)
t = time.time()
if disagg:
if disagg == 'mini':
disaggregation = MiniSolverDisaggregation(
disaggregated_network,
network,
clustering,
skip=skip)
elif disagg == 'uniform':
disaggregation = UniformDisaggregation(disaggregated_network,
network,
clustering,
skip=skip)
else:
raise Exception('Invalid disaggregation command: ' + disagg)
disaggregation.execute(scenario, solver=args['solver'])
# temporal bug fix for solar generator which ar during night time
# nan instead of 0
disaggregated_network.generators_t.p.fillna(0, inplace=True)
disaggregated_network.generators_t.q.fillna(0, inplace=True)
disaggregated_network.results = network.results
print("Time for overall desaggregation [min]: {:.2}"
.format((time.time() - t) / 60))
# write lpfile to path
if not args['lpfile'] is False:
network.model.write(
args['lpfile'], io_options={
'symbolic_solver_labels': True})
# write PyPSA results back to database
if args['db_export']:
username = str(conn.url).split('//')[1].split(':')[0]
args['user_name'] = username
results_to_oedb(
session,
network,
dict([("disaggregated_results", False)] + list(args.items())),
grid='hv',
safe_results=False)
if disaggregated_network:
results_to_oedb(
session,
disaggregated_network,
dict([("disaggregated_results", True)] + list(args.items())),
grid='hv',
safe_results=False)
# write PyPSA results to csv to path
if not args['csv_export'] is False:
if not args['pf_post_lopf']:
results_to_csv(network, args)
else:
results_to_csv(network, args, pf_solution=pf_solution)
if disaggregated_network:
results_to_csv(
disaggregated_network,
{k: os.path.join(v, 'disaggregated')
if k == 'csv_export' else v
for k, v in args.items()})
# close session
# session.close()
return network, disaggregated_network | The etrago function works with following arguments:
Parameters
----------
db : str
``'oedb'``,
Name of Database session setting stored in *config.ini* of *.egoio*
gridversion : NoneType or str
``'v0.2.11'``,
Name of the data version number of oedb: state ``'None'`` for
model_draft (sand-box) or an explicit version number
(e.g. 'v0.2.10') for the grid schema.
method : str
``'lopf'``,
Choose between a non-linear power flow ('pf') or
a linear optimal power flow ('lopf').
pf_post_lopf : bool
False,
Option to run a non-linear power flow (pf) directly after the
linear optimal power flow (and thus the dispatch) has finished.
start_snapshot : int
1,
Start hour of the scenario year to be calculated.
end_snapshot : int
2,
End hour of the scenario year to be calculated.
solver : str
'glpk',
Choose your preferred solver. Current options: 'glpk' (open-source),
'cplex' or 'gurobi'.
scn_name : str
'Status Quo',
Choose your scenario. Currently, there are three different
scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not
want to use the full German dataset, you can use the excerpt of
Schleswig-Holstein by adding the acronym SH to the scenario
name (e.g. 'SH Status Quo').
scn_extension : NoneType or list
None,
Choose extension-scenarios which will be added to the existing
network container. Data of the extension scenarios are located in
extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus)
with the prefix 'extension_'.
Currently there are three overlay networks:
'nep2035_confirmed' includes all planed new lines confirmed by the
Bundesnetzagentur
'nep2035_b2' includes all new lines planned by the
Netzentwicklungsplan 2025 in scenario 2035 B2
'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and
adds BE and NO as electrical neighbours
scn_decommissioning : str
None,
Choose an extra scenario which includes lines you want to decommise
from the existing network. Data of the decommissioning scenarios are
located in extension-tables
(e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix
'decommissioning_'.
Currently, there are two decommissioning_scenarios which are linked to
extension-scenarios:
'nep2035_confirmed' includes all lines that will be replaced in
confirmed projects
'nep2035_b2' includes all lines that will be replaced in
NEP-scenario 2035 B2
lpfile : obj
False,
State if and where you want to save pyomo's lp file. Options:
False or '/path/tofolder'.import numpy as np
csv_export : obj
False,
State if and where you want to save results as csv files.Options:
False or '/path/tofolder'.
db_export : bool
False,
State if you want to export the results of your calculation
back to the database.
extendable : list
['network', 'storages'],
Choose components you want to optimize.
Settings can be added in /tools/extendable.py.
The most important possibilities:
'network': set all lines, links and transformers extendable
'german_network': set lines and transformers in German grid
extendable
'foreign_network': set foreign lines and transformers extendable
'transformers': set all transformers extendable
'overlay_network': set all components of the 'scn_extension'
extendable
'storages': allow to install extendable storages
(unlimited in size) at each grid node in order to meet
the flexibility demand.
'network_preselection': set only preselected lines extendable,
method is chosen in function call
generator_noise : bool or int
State if you want to apply a small random noise to the marginal costs
of each generator in order to prevent an optima plateau. To reproduce
a noise, choose the same integer (seed number).
minimize_loading : bool
False,
...
ramp_limits : bool
False,
State if you want to consider ramp limits of generators.
Increases time for solving significantly.
Only works when calculating at least 30 snapshots.
extra_functionality : str or None
None,
Choose name of extra functionality described in etrago/utilities.py
"min_renewable_share" to set a minimal share of renewable energy or
"max_line_ext" to set an overall maximum of line expansion.
When activating snapshot_clustering or minimize_loading these
extra_funtionalities are overwritten and therefore neglected.
network_clustering_kmeans : bool or int
False,
State if you want to apply a clustering of all network buses down to
only ``'k'`` buses. The weighting takes place considering generation
and load
at each node. If so, state the number of k you want to apply. Otherwise
put False. This function doesn't work together with
``'line_grouping = True'``.
load_cluster : bool or obj
state if you want to load cluster coordinates from a previous run:
False or /path/tofile (filename similar to ./cluster_coord_k_n_result).
network_clustering_ehv : bool
False,
Choose if you want to cluster the full HV/EHV dataset down to only the
EHV buses. In that case, all HV buses are assigned to their closest EHV
sub-station, taking into account the shortest distance on power lines.
snapshot_clustering : bool or int
False,
State if you want to cluster the snapshots and run the optimization
only on a subset of snapshot periods. The int value defines the number
of periods (i.e. days) which will be clustered to.
Move to PyPSA branch:features/snapshot_clustering
parallelisation : bool
False,
Choose if you want to calculate a certain number of snapshots in
parallel. If yes, define the respective amount in the if-clause
execution below. Otherwise state False here.
line_grouping : bool
True,
State if you want to group lines that connect the same two buses
into one system.
branch_capacity_factor : dict
{'HV': 0.5, 'eHV' : 0.7},
Add a factor here if you want to globally change line capacities
(e.g. to "consider" an (n-1) criterion or for debugging purposes).
load_shedding : bool
False,
State here if you want to make use of the load shedding function which
is helpful when debugging: a very expensive generator is set to each
bus and meets the demand when regular
generators cannot do so.
foreign_lines : dict
{'carrier':'AC', 'capacity': 'osmTGmod}'
Choose transmission technology and capacity of foreign lines:
'carrier': 'AC' or 'DC'
'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer'
comments : str
None
Returns
-------
network : `pandas.DataFrame<dataframe>`
eTraGo result network based on `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ | entailment |
def open_state_machine(path=None, recent_opened_notification=False):
""" Open a state machine from respective file system path
:param str path: file system path to the state machine
:param bool recent_opened_notification: flags that indicates that this call also should update recently open
:rtype rafcon.core.state_machine.StateMachine
:return: opened state machine
"""
start_time = time.time()
if path is None:
if interface.open_folder_func is None:
logger.error("No function defined for opening a folder")
return
load_path = interface.open_folder_func("Please choose the folder of the state machine")
if load_path is None:
return
else:
load_path = path
if state_machine_manager.is_state_machine_open(load_path):
logger.info("State machine already open. Select state machine instance from path {0}.".format(load_path))
sm = state_machine_manager.get_open_state_machine_of_file_system_path(load_path)
gui_helper_state.gui_singletons.state_machine_manager_model.selected_state_machine_id = sm.state_machine_id
return state_machine_manager.get_open_state_machine_of_file_system_path(load_path)
state_machine = None
try:
state_machine = storage.load_state_machine_from_path(load_path)
state_machine_manager.add_state_machine(state_machine)
if recent_opened_notification:
global_runtime_config.update_recently_opened_state_machines_with(state_machine)
duration = time.time() - start_time
stat = state_machine.root_state.get_states_statistics(0)
logger.info("It took {0:.2}s to load {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1]))
except (AttributeError, ValueError, IOError) as e:
logger.error('Error while trying to open state machine: {0}'.format(e))
return state_machine | Open a state machine from respective file system path
:param str path: file system path to the state machine
:param bool recent_opened_notification: flags that indicates that this call also should update recently open
:rtype rafcon.core.state_machine.StateMachine
:return: opened state machine | entailment |
def save_state_machine(delete_old_state_machine=False, recent_opened_notification=False, as_copy=False, copy_path=None):
""" Save selected state machine
The function checks if states of the state machine has not stored script data abd triggers dialog windows to
take user input how to continue (ignoring or storing this script changes).
If the state machine file_system_path is None function save_state_machine_as is used to collect respective path and
to store the state machine.
The delete flag will remove all data in existing state machine folder (if plugins or feature use non-standard
RAFCON files this data will be removed)
:param bool delete_old_state_machine: Flag to delete existing state machine folder before storing current version
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if the storing was successful, False if the storing process was canceled or stopped by condition fail
:rtype bool:
"""
state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model
states_editor_ctrl = rafcon.gui.singleton.main_window_controller.get_controller('states_editor_ctrl')
state_machine_m = state_machine_manager_model.get_selected_state_machine_model()
if state_machine_m is None:
logger.warning("Can not 'save state machine' because no state machine is selected.")
return False
previous_path = state_machine_m.state_machine.file_system_path
previous_marked_dirty = state_machine_m.state_machine.marked_dirty
all_tabs = list(states_editor_ctrl.tabs.values())
all_tabs.extend(states_editor_ctrl.closed_tabs.values())
dirty_source_editor_ctrls = [tab_dict['controller'].get_controller('source_ctrl') for tab_dict in all_tabs if
tab_dict['source_code_view_is_dirty'] is True and
tab_dict['state_m'].state.get_state_machine().state_machine_id ==
state_machine_m.state_machine.state_machine_id]
for dirty_source_editor_ctrl in dirty_source_editor_ctrls:
state = dirty_source_editor_ctrl.model.state
message_string = "The source code of the state '{}' (path: {}) has not been applied yet and would " \
"therefore not be saved.\n\nDo you want to apply the changes now?" \
"".format(state.name, state.get_path())
if global_gui_config.get_config_value("AUTO_APPLY_SOURCE_CODE_CHANGES", False):
dirty_source_editor_ctrl.apply_clicked(None)
else:
dialog = RAFCONButtonDialog(message_string, ["Apply", "Ignore changes"],
message_type=Gtk.MessageType.WARNING, parent=states_editor_ctrl.get_root_window())
response_id = dialog.run()
state = dirty_source_editor_ctrl.model.state
if response_id == 1: # Apply changes
logger.debug("Applying source code changes of state '{}'".format(state.name))
dirty_source_editor_ctrl.apply_clicked(None)
elif response_id == 2: # Ignore changes
logger.debug("Ignoring source code changes of state '{}'".format(state.name))
else:
logger.warning("Response id: {} is not considered".format(response_id))
return False
dialog.destroy()
save_path = state_machine_m.state_machine.file_system_path
if not as_copy and save_path is None or as_copy and copy_path is None:
if not save_state_machine_as(as_copy=as_copy):
return False
return True
logger.debug("Saving state machine to {0}".format(save_path))
state_machine_m = state_machine_manager_model.get_selected_state_machine_model()
sm_path = state_machine_m.state_machine.file_system_path
storage.save_state_machine_to_path(state_machine_m.state_machine, copy_path if as_copy else sm_path,
delete_old_state_machine=delete_old_state_machine, as_copy=as_copy)
if recent_opened_notification:
global_runtime_config.update_recently_opened_state_machines_with(state_machine_m.state_machine)
state_machine_m.store_meta_data(copy_path=copy_path if as_copy else None)
logger.debug("Saved state machine and its meta data.")
library_manager_model.state_machine_was_stored(state_machine_m, previous_path)
return True | Save selected state machine
The function checks if states of the state machine has not stored script data abd triggers dialog windows to
take user input how to continue (ignoring or storing this script changes).
If the state machine file_system_path is None function save_state_machine_as is used to collect respective path and
to store the state machine.
The delete flag will remove all data in existing state machine folder (if plugins or feature use non-standard
RAFCON files this data will be removed)
:param bool delete_old_state_machine: Flag to delete existing state machine folder before storing current version
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if the storing was successful, False if the storing process was canceled or stopped by condition fail
:rtype bool: | entailment |
def save_state_machine_as(path=None, recent_opened_notification=False, as_copy=False):
""" Store selected state machine to path
If there is no handed path the interface dialog "create folder" is used to collect one. The state machine finally
is stored by the save_state_machine function.
:param str path: Path of state machine folder where selected state machine should be stored
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool:
"""
state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model
selected_state_machine_model = state_machine_manager_model.get_selected_state_machine_model()
if selected_state_machine_model is None:
logger.warning("Can not 'save state machine as' because no state machine is selected.")
return False
if path is None:
if interface.create_folder_func is None:
logger.error("No function defined for creating a folder")
return False
folder_name = selected_state_machine_model.state_machine.root_state.name
path = interface.create_folder_func("Please choose a root folder and a folder name for the state-machine. "
"The default folder name is the name of the root state.",
format_default_folder_name(folder_name))
if path is None:
logger.warning("No valid path specified")
return False
previous_path = selected_state_machine_model.state_machine.file_system_path
if not as_copy:
marked_dirty = selected_state_machine_model.state_machine.marked_dirty
recent_opened_notification = recent_opened_notification and (not previous_path == path or marked_dirty)
selected_state_machine_model.state_machine.file_system_path = path
result = save_state_machine(delete_old_state_machine=True,
recent_opened_notification=recent_opened_notification,
as_copy=as_copy, copy_path=path)
library_manager_model.state_machine_was_stored(selected_state_machine_model, previous_path)
return result | Store selected state machine to path
If there is no handed path the interface dialog "create folder" is used to collect one. The state machine finally
is stored by the save_state_machine function.
:param str path: Path of state machine folder where selected state machine should be stored
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool: | entailment |
def save_selected_state_as():
"""Save selected state as separate state machine
:return True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool:
:raises exceptions.ValueError: If dialog response ids are out of bounds
"""
state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model
selection = state_machine_manager_model.get_selected_state_machine_model().selection
selected_state = selection.get_selected_state()
state_machine_id = state_machine_manager_model.get_selected_state_machine_model().state_machine.state_machine_id
if len(selection.states) == 1:
state_m = copy.copy(selected_state)
sm_m = StateMachineModel(StateMachine(root_state=state_m.state))
sm_m.root_state = state_m
path = interface.create_folder_func("Please choose a root folder and a folder name for the state-machine your "
"state is saved in. The default folder name is the name of state.",
format_default_folder_name(selected_state.state.name))
if path:
storage.save_state_machine_to_path(sm_m.state_machine, base_path=path)
sm_m.store_meta_data()
else:
logger.warning("No valid path specified")
return False
def open_as_state_machine_saved_state_as_separate_state_machine():
logger.debug("Open state machine.")
try:
open_state_machine(path=path, recent_opened_notification=True)
except (ValueError, IOError) as e:
logger.error('Error while trying to open state machine: {0}'.format(e))
# check if state machine is in library path
root_window = rafcon.gui.singleton.main_window_controller.get_root_window()
if library_manager.is_os_path_within_library_root_paths(path):
_library_path, _library_name = \
library_manager.get_library_path_and_name_for_os_path(sm_m.state_machine.file_system_path)
overwrote_old_lib = library_manager.is_library_in_libraries(_library_path, _library_name)
message_string = "You stored your state machine in a path that is within the library root paths. " \
"Thereby your state machine can be used as a library state.\n\n"\
"Do you want to:"
table_header = ["Option", "Description"]
table_data = [(True, "Substitute the original state by this new library state."),
(True, "Open the newly created library state machine.")]
if overwrote_old_lib:
table_data.append((False, "Refresh all open state machines, as an already existing library was "
"overwritten."))
dialog = RAFCONCheckBoxTableDialog(message_string,
button_texts=("Apply", "Cancel"),
table_header=table_header, table_data=table_data,
message_type=Gtk.MessageType.QUESTION,
parent=root_window,
width=800, standalone=False)
response_id = dialog.run()
if response_id == 1: # Apply pressed
if overwrote_old_lib and dialog.list_store[2][0]: # refresh all open state machine selected
logger.debug("Refresh all is triggered.")
refresh_all()
else: # if not all was refreshed at least the libraries are refreshed
logger.debug("Library refresh is triggered.")
refresh_libraries()
if dialog.list_store[0][0]: # Substitute saved state with Library selected
logger.debug("Substitute saved state with Library.")
if dialog.list_store[0][0] or dialog.list_store[0][1]:
refresh_libraries()
state_machine_manager_model.selected_state_machine_id = state_machine_id
[library_path, library_name] = library_manager.get_library_path_and_name_for_os_path(path)
state = library_manager.get_library_instance(library_path, library_name)
try:
substitute_selected_state(state, as_template=False)
except ValueError as e:
logger.error('Error while trying to open state machine: {0}'.format(e))
if dialog.list_store[1][0]: # Open as state machine saved state as separate state machine selected
open_as_state_machine_saved_state_as_separate_state_machine()
elif response_id in [2, -4]: # Cancel or Close pressed
pass
else:
raise ValueError("Response id: {} is not considered".format(response_id))
dialog.destroy()
else:
# Offer to open saved state machine dialog
message_string = "Should the newly created state machine be opened?"
dialog = RAFCONButtonDialog(message_string, ["Open", "Do not open"],
message_type=Gtk.MessageType.QUESTION,
parent=root_window)
response_id = dialog.run()
if response_id == 1: # Apply pressed
open_as_state_machine_saved_state_as_separate_state_machine()
elif response_id in [2, -4]: # Cancel or Close pressed
pass
else:
raise ValueError("Response id: {} is not considered".format(response_id))
dialog.destroy()
return True
else:
logger.warning("Multiple states can not be saved as state machine directly. Group them before.")
return False | Save selected state as separate state machine
:return True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool:
:raises exceptions.ValueError: If dialog response ids are out of bounds | entailment |
def is_state_machine_stopped_to_proceed(selected_sm_id=None, root_window=None):
""" Check if state machine is stopped and in case request user by dialog how to proceed
The function checks if a specific state machine or by default all state machines have stopped or finished
execution. If a state machine is still running the user is ask by dialog window if those should be stopped or not.
:param selected_sm_id: Specific state mine to check for
:param root_window: Root window for dialog window
:return:
"""
# check if the/a state machine is still running
if not state_machine_execution_engine.finished_or_stopped():
if selected_sm_id is None or selected_sm_id == state_machine_manager.active_state_machine_id:
message_string = "A state machine is still running. This state machine can only be refreshed" \
"when not longer running."
dialog = RAFCONButtonDialog(message_string, ["Stop execution and refresh",
"Keep running and do not refresh"],
message_type=Gtk.MessageType.QUESTION,
parent=root_window)
response_id = dialog.run()
state_machine_stopped = False
if response_id == 1:
state_machine_execution_engine.stop()
state_machine_stopped = True
elif response_id == 2:
logger.debug("State machine will stay running and no refresh will be performed!")
dialog.destroy()
return state_machine_stopped
return True | Check if state machine is stopped and in case request user by dialog how to proceed
The function checks if a specific state machine or by default all state machines have stopped or finished
execution. If a state machine is still running the user is ask by dialog window if those should be stopped or not.
:param selected_sm_id: Specific state mine to check for
:param root_window: Root window for dialog window
:return: | entailment |
def refresh_selected_state_machine():
"""Reloads the selected state machine.
"""
selected_sm_id = rafcon.gui.singleton.state_machine_manager_model.selected_state_machine_id
selected_sm = state_machine_manager.state_machines[selected_sm_id]
state_machines_editor_ctrl = rafcon.gui.singleton.main_window_controller.get_controller('state_machines_editor_ctrl')
states_editor_ctrl = rafcon.gui.singleton.main_window_controller.get_controller('states_editor_ctrl')
if not is_state_machine_stopped_to_proceed(selected_sm_id, state_machines_editor_ctrl.get_root_window()):
return
# check if the a dirty flag is still set
all_tabs = list(states_editor_ctrl.tabs.values())
all_tabs.extend(states_editor_ctrl.closed_tabs.values())
dirty_source_editor = [tab_dict['controller'] for tab_dict in all_tabs if
tab_dict['source_code_view_is_dirty'] is True]
if selected_sm.marked_dirty or dirty_source_editor:
message_string = "Are you sure you want to reload the currently selected state machine?\n\n" \
"The following elements have been modified and not saved. " \
"These changes will get lost:"
message_string = "%s\n* State machine #%s and name '%s'" % (
message_string, str(selected_sm_id), selected_sm.root_state.name)
for ctrl in dirty_source_editor:
if ctrl.model.state.get_state_machine().state_machine_id == selected_sm_id:
message_string = "%s\n* Source code of state with name '%s' and path '%s'" % (
message_string, ctrl.model.state.name, ctrl.model.state.get_path())
dialog = RAFCONButtonDialog(message_string, ["Reload anyway", "Cancel"],
message_type=Gtk.MessageType.WARNING, parent=states_editor_ctrl.get_root_window())
response_id = dialog.run()
dialog.destroy()
if response_id == 1: # Reload anyway
pass
else:
logger.debug("Refresh of selected state machine canceled")
return
library_manager.clean_loaded_libraries()
refresh_libraries()
states_editor_ctrl.close_pages_for_specific_sm_id(selected_sm_id)
state_machines_editor_ctrl.refresh_state_machine_by_id(selected_sm_id) | Reloads the selected state machine. | entailment |
def refresh_all(force=False):
"""Remove/close all libraries and state machines and reloads them freshly from the file system
:param bool force: Force flag to avoid any checks
"""
state_machines_editor_ctrl = rafcon.gui.singleton.main_window_controller.get_controller('state_machines_editor_ctrl')
states_editor_ctrl = rafcon.gui.singleton.main_window_controller.get_controller('states_editor_ctrl')
if force:
pass # no checks direct refresh
else:
# check if a state machine is still running
if not is_state_machine_stopped_to_proceed(root_window=states_editor_ctrl.get_root_window()):
return
# check if the a dirty flag is still set
all_tabs = list(states_editor_ctrl.tabs.values())
all_tabs.extend(states_editor_ctrl.closed_tabs.values())
dirty_source_editor = [tab_dict['controller'] for tab_dict in all_tabs if
tab_dict['source_code_view_is_dirty'] is True]
if state_machine_manager.has_dirty_state_machine() or dirty_source_editor:
message_string = "Are you sure you want to reload the libraries and all state machines?\n\n" \
"The following elements have been modified and not saved. " \
"These changes will get lost:"
for sm_id, sm in state_machine_manager.state_machines.items():
if sm.marked_dirty:
message_string = "%s\n* State machine #%s and name '%s'" % (
message_string, str(sm_id), sm.root_state.name)
for ctrl in dirty_source_editor:
message_string = "%s\n* Source code of state with name '%s' and path '%s'" % (
message_string, ctrl.model.state.name, ctrl.model.state.get_path())
dialog = RAFCONButtonDialog(message_string, ["Reload anyway", "Cancel"],
message_type=Gtk.MessageType.WARNING, parent=states_editor_ctrl.get_root_window())
response_id = dialog.run()
dialog.destroy()
if response_id == 1: # Reload anyway
pass
else:
logger.debug("Refresh canceled")
return
library_manager.clean_loaded_libraries()
refresh_libraries()
states_editor_ctrl.close_all_pages()
state_machines_editor_ctrl.refresh_all_state_machines() | Remove/close all libraries and state machines and reloads them freshly from the file system
:param bool force: Force flag to avoid any checks | entailment |
def delete_core_element_of_model(model, raise_exceptions=False, recursive=True, destroy=True, force=False):
"""Deletes respective core element of handed model of its state machine
If the model is one of state, data flow or transition, it is tried to delete that model together with its
data from the corresponding state machine.
:param model: The model of respective core element to delete
:param bool raise_exceptions: Whether to raise exceptions or only log errors in case of failures
:param bool destroy: Access the destroy flag of the core remove methods
:return: True if successful, False else
"""
if isinstance(model, AbstractStateModel) and model.state.is_root_state:
logger.warning("Deletion is not allowed. {0} is root state of state machine.".format(model.core_element))
return False
state_m = model.parent
if state_m is None:
msg = "Model has no parent from which it could be deleted from"
if raise_exceptions:
raise ValueError(msg)
logger.error(msg)
return False
if is_selection_inside_of_library_state(selected_elements=[model]):
logger.warning("Deletion is not allowed. Element {0} is inside of a library.".format(model.core_element))
return False
assert isinstance(state_m, StateModel)
state = state_m.state
core_element = model.core_element
try:
if core_element in state:
state.remove(core_element, recursive=recursive, destroy=destroy, force=force)
return True
return False
except (AttributeError, ValueError) as e:
if raise_exceptions:
raise
logger.error("The model '{}' for core element '{}' could not be deleted: {}".format(model, core_element, e))
return False | Deletes respective core element of handed model of its state machine
If the model is one of state, data flow or transition, it is tried to delete that model together with its
data from the corresponding state machine.
:param model: The model of respective core element to delete
:param bool raise_exceptions: Whether to raise exceptions or only log errors in case of failures
:param bool destroy: Access the destroy flag of the core remove methods
:return: True if successful, False else | entailment |
def delete_core_elements_of_models(models, raise_exceptions=True, recursive=True, destroy=True, force=False):
"""Deletes all respective core elements for the given models
Calls the :func:`delete_core_element_of_model` for all given models.
:param models: A single model or a list of models of respective core element to be deleted
:param bool raise_exceptions: Whether to raise exceptions or log error messages in case of an error
:param bool destroy: Access the destroy flag of the core remove methods
:return: The number of models that were successfully deleted
"""
# If only one model is given, make a list out of it
if not hasattr(models, '__iter__'):
models = [models]
return sum(delete_core_element_of_model(model, raise_exceptions, recursive=recursive, destroy=destroy, force=force)
for model in models) | Deletes all respective core elements for the given models
Calls the :func:`delete_core_element_of_model` for all given models.
:param models: A single model or a list of models of respective core element to be deleted
:param bool raise_exceptions: Whether to raise exceptions or log error messages in case of an error
:param bool destroy: Access the destroy flag of the core remove methods
:return: The number of models that were successfully deleted | entailment |
def is_selection_inside_of_library_state(state_machine_m=None, selected_elements=None):
""" Check if handed or selected elements are inside of library state
If no state machine model or selected_elements are handed the method is searching for the selected state machine and
its selected elements. If selected_elements list is handed handed state machine model is ignored.
:param rafcon.gui.models.state_machine.StateMachineModel state_machine_m: Optional state machine model
:param list selected_elements: Optional model list that is considered to be selected
:return: True if elements inside of library state
"""
if state_machine_m is None:
state_machine_m = rafcon.gui.singleton.state_machine_manager_model.get_selected_state_machine_model()
if state_machine_m is None and selected_elements is None:
return False
selection_in_lib = []
selected_elements = state_machine_m.selection.get_all() if selected_elements is None else selected_elements
for model in selected_elements:
# check if model is element of child state or the root state (or its scoped variables) of a LibraryState
state_m = model if isinstance(model.core_element, State) else model.parent
selection_in_lib.append(state_m.state.get_next_upper_library_root_state() is not None)
# check if model is part of the shell (io-port or outcome) of a LibraryState
if not isinstance(model.core_element, State) and isinstance(state_m, LibraryStateModel):
selection_in_lib.append(True)
if any(selection_in_lib):
return True
return False | Check if handed or selected elements are inside of library state
If no state machine model or selected_elements are handed the method is searching for the selected state machine and
its selected elements. If selected_elements list is handed handed state machine model is ignored.
:param rafcon.gui.models.state_machine.StateMachineModel state_machine_m: Optional state machine model
:param list selected_elements: Optional model list that is considered to be selected
:return: True if elements inside of library state | entailment |
def add_new_state(state_machine_m, state_type):
"""Triggered when shortcut keys for adding a new state are pressed, or Menu Bar "Edit, Add State" is clicked.
Adds a new state only if the parent state (selected state) is a container state, and if the graphical editor or
the state machine tree are in focus.
"""
assert isinstance(state_machine_m, StateMachineModel)
if state_type not in list(StateType):
state_type = StateType.EXECUTION
if len(state_machine_m.selection.states) != 1:
logger.warning("Please select exactly one desired parent state, before adding a new state")
return
state_m = state_machine_m.selection.get_selected_state()
if is_selection_inside_of_library_state(selected_elements=[state_m]):
logger.warning("Add new state is not performed because selected target state is inside of a library state.")
return
if isinstance(state_m, StateModel):
return gui_helper_state.add_state(state_m, state_type)
else:
logger.warning("Add new state is not performed because target state indication has to be a {1} not {0}"
"".format(state_m.__class__.__name__, StateModel.__name__))
# TODO this code can not be reached -> recover again? -> e.g. feature select transition add's state to parent
if isinstance(state_m, (TransitionModel, DataFlowModel)) or \
isinstance(state_m, (DataPortModel, OutcomeModel)) and isinstance(state_m.parent, ContainerStateModel):
return gui_helper_state.add_state(state_m.parent, state_type) | Triggered when shortcut keys for adding a new state are pressed, or Menu Bar "Edit, Add State" is clicked.
Adds a new state only if the parent state (selected state) is a container state, and if the graphical editor or
the state machine tree are in focus. | entailment |
def insert_state_into_selected_state(state, as_template=False):
"""Adds a State to the selected state
:param state: the state which is inserted
:param as_template: If a state is a library state can be insert as template
:return: boolean: success of the insertion
"""
smm_m = rafcon.gui.singleton.state_machine_manager_model
if not isinstance(state, State):
logger.warning("A state is needed to be insert not {0}".format(state))
return False
if not smm_m.selected_state_machine_id:
logger.warning("Please select a container state within a state machine first")
return False
selection = smm_m.state_machines[smm_m.selected_state_machine_id].selection
if len(selection.states) > 1:
logger.warning("Please select exactly one state for the insertion")
return False
if len(selection.states) == 0:
logger.warning("Please select a state for the insertion")
return False
if is_selection_inside_of_library_state(selected_elements=[selection.get_selected_state()]):
logger.warning("State is not insert because target state is inside of a library state.")
return False
gui_helper_state.insert_state_as(selection.get_selected_state(), state, as_template)
return True | Adds a State to the selected state
:param state: the state which is inserted
:param as_template: If a state is a library state can be insert as template
:return: boolean: success of the insertion | entailment |
def substitute_selected_state(state, as_template=False, keep_name=False):
""" Substitute the selected state with the handed state
:param rafcon.core.states.state.State state: A state of any functional type that derives from State
:param bool as_template: The flag determines if a handed the state of type LibraryState is insert as template
:return:
"""
# print("substitute_selected_state", state, as_template)
assert isinstance(state, State)
from rafcon.core.states.barrier_concurrency_state import DeciderState
if isinstance(state, DeciderState):
raise ValueError("State of type DeciderState can not be substituted.")
smm_m = rafcon.gui.singleton.state_machine_manager_model
if not smm_m.selected_state_machine_id:
logger.error("Selected state machine can not be found, please select a state within a state machine first.")
return False
selection = smm_m.state_machines[smm_m.selected_state_machine_id].selection
selected_state_m = selection.get_selected_state()
if len(selection.states) != 1:
logger.error("Please select exactly one state for the substitution")
return False
if is_selection_inside_of_library_state(selected_elements=[selected_state_m]):
logger.warning("Substitute is not performed because target state is inside of a library state.")
return
gui_helper_state.substitute_state_as(selected_state_m, state, as_template, keep_name)
return True | Substitute the selected state with the handed state
:param rafcon.core.states.state.State state: A state of any functional type that derives from State
:param bool as_template: The flag determines if a handed the state of type LibraryState is insert as template
:return: | entailment |
def construct_partial_network(self, cluster, scenario):
"""
Compute the partial network that has been merged into a single cluster.
The resulting network retains the external cluster buses that
share some line with the cluster identified by `cluster`.
These external buses will be prefixed by self.id_prefix in order to
prevent name clashes with buses in the disaggregation
:param cluster: Index of the cluster to disaggregate
:return: Tuple of (partial_network, external_buses) where
`partial_network` is the result of the partial decomposition
and `external_buses` represent clusters adjacent to `cluster` that may
be influenced by calculations done on the partial network.
"""
#Create an empty network
partial_network = Network()
# find all lines that have at least one bus inside the cluster
busflags = (self.buses['cluster'] == cluster)
def is_bus_in_cluster(conn):
return busflags[conn]
# Copy configurations to new network
partial_network.snapshots = self.original_network.snapshots
partial_network.snapshot_weightings = (self.original_network
.snapshot_weightings)
partial_network.carriers = self.original_network.carriers
# Collect all connectors that have some node inside the cluster
external_buses = pd.DataFrame()
line_types = ['lines', 'links', 'transformers']
for line_type in line_types:
# Copy all lines that reside entirely inside the cluster ...
setattr(partial_network, line_type,
filter_internal_connector(
getattr(self.original_network, line_type),
is_bus_in_cluster))
# ... and their time series
# TODO: These are all time series, not just the ones from lines
# residing entirely in side the cluster.
# Is this a problem?
setattr(partial_network, line_type + '_t',
getattr(self.original_network, line_type + '_t'))
# Copy all lines whose `bus0` lies within the cluster
left_external_connectors = filter_left_external_connector(
getattr(self.original_network, line_type),
is_bus_in_cluster)
if not left_external_connectors.empty:
f = lambda x: self.idx_prefix + self.clustering.busmap.loc[x]
ca_option = pd.get_option('mode.chained_assignment')
pd.set_option('mode.chained_assignment', None)
left_external_connectors.loc[:, 'bus0'] = (
left_external_connectors.loc[:, 'bus0'].apply(f))
pd.set_option('mode.chained_assignment', ca_option)
external_buses = pd.concat((external_buses,
left_external_connectors.bus0))
# Copy all lines whose `bus1` lies within the cluster
right_external_connectors = filter_right_external_connector(
getattr(self.original_network, line_type),
is_bus_in_cluster)
if not right_external_connectors.empty:
f = lambda x: self.idx_prefix + self.clustering.busmap.loc[x]
ca_option = pd.get_option('mode.chained_assignment')
pd.set_option('mode.chained_assignment', None)
right_external_connectors.loc[:, 'bus1'] = (
right_external_connectors.loc[:, 'bus1'].apply(f))
pd.set_option('mode.chained_assignment', ca_option)
external_buses = pd.concat((external_buses,
right_external_connectors.bus1))
# Collect all buses that are contained in or somehow connected to the
# cluster
buses_in_lines = self.buses[busflags].index
bus_types = ['loads', 'generators', 'stores', 'storage_units',
'shunt_impedances']
# Copy all values that are part of the cluster
partial_network.buses = self.original_network.buses[
self.original_network.buses.index.isin(buses_in_lines)]
# Collect all buses that are external, but connected to the cluster ...
externals_to_insert = self.clustered_network.buses[
self.clustered_network.buses.index.isin(
map(lambda x: x[0][len(self.idx_prefix):],
external_buses.values))]
# ... prefix them to avoid name clashes with buses from the original
# network ...
self.reindex_with_prefix(externals_to_insert)
# .. and insert them as well as their time series
partial_network.buses = (partial_network.buses
.append(externals_to_insert))
partial_network.buses_t = self.original_network.buses_t
# TODO: Rename `bustype` to on_bus_type
for bustype in bus_types:
# Copy loads, generators, ... from original network to network copy
setattr(partial_network, bustype,
filter_buses(getattr(self.original_network, bustype),
buses_in_lines))
# Collect on-bus components from external, connected clusters
buses_to_insert = filter_buses(
getattr(self.clustered_network, bustype),
map(lambda x: x[0][len(self.idx_prefix):],
external_buses.values))
# Prefix their external bindings
buses_to_insert.loc[:, 'bus'] = (
self.idx_prefix +
buses_to_insert.loc[:, 'bus'])
setattr(partial_network, bustype,
getattr(partial_network, bustype).append(buses_to_insert))
# Also copy their time series
setattr(partial_network,
bustype + '_t',
getattr(self.original_network, bustype + '_t'))
# Note: The code above copies more than necessary, because it
# copies every time series for `bustype` from the original
# network and not only the subset belonging to the partial
# network. The commented code below tries to filter the time
# series accordingly, but there must be bug somewhere because
# using it, the time series in the clusters and sums of the
# time series after disaggregation don't match up.
"""
series = getattr(self.original_network, bustype + '_t')
partial_series = type(series)()
for s in series:
partial_series[s] = series[s].loc[
:,
getattr(partial_network, bustype)
.index.intersection(series[s].columns)]
setattr(partial_network, bustype + '_t', partial_series)
"""
# Just a simple sanity check
# TODO: Remove when sure that disaggregation will not go insane anymore
for line_type in line_types:
assert (getattr(partial_network, line_type).bus0.isin(
partial_network.buses.index).all())
assert (getattr(partial_network, line_type).bus1.isin(
partial_network.buses.index).all())
return partial_network, external_buses | Compute the partial network that has been merged into a single cluster.
The resulting network retains the external cluster buses that
share some line with the cluster identified by `cluster`.
These external buses will be prefixed by self.id_prefix in order to
prevent name clashes with buses in the disaggregation
:param cluster: Index of the cluster to disaggregate
:return: Tuple of (partial_network, external_buses) where
`partial_network` is the result of the partial decomposition
and `external_buses` represent clusters adjacent to `cluster` that may
be influenced by calculations done on the partial network. | entailment |
def solve(self, scenario, solver):
"""
Decompose each cluster into separate units and try to optimize them
separately
:param scenario:
:param solver: Solver that may be used to optimize partial networks
"""
clusters = set(self.clustering.busmap.values)
n = len(clusters)
self.stats = {'clusters': pd.DataFrame(
index=sorted(clusters),
columns=["decompose", "spread", "transfer"])}
profile = cProfile.Profile()
for i, cluster in enumerate(sorted(clusters)):
print('---')
print('Decompose cluster %s (%d/%d)' % (cluster, i+1, n))
profile.enable()
t = time.time()
partial_network, externals = self.construct_partial_network(
cluster,
scenario)
profile.disable()
self.stats['clusters'].loc[cluster, 'decompose'] = time.time() - t
print('Decomposed in ',
self.stats['clusters'].loc[cluster, 'decompose'])
t = time.time()
profile.enable()
self.solve_partial_network(cluster, partial_network, scenario,
solver)
profile.disable()
self.stats['clusters'].loc[cluster, 'spread'] = time.time() - t
print('Result distributed in ',
self.stats['clusters'].loc[cluster, 'spread'])
profile.enable()
t = time.time()
self.transfer_results(partial_network, externals)
profile.disable()
self.stats['clusters'].loc[cluster, 'transfer'] = time.time() - t
print('Results transferred in ',
self.stats['clusters'].loc[cluster, 'transfer'])
profile.enable()
t = time.time()
print('---')
fs = (mc("sum"), mc("sum"))
for bt, ts in (
('generators', {'p': fs, 'q': fs}),
('storage_units', {'p': fs, 'state_of_charge': fs, 'q': fs})):
print("Attribute sums, {}, clustered - disaggregated:" .format(bt))
cnb = getattr(self.clustered_network, bt)
onb = getattr(self.original_network, bt)
print("{:>{}}: {}".format('p_nom_opt', 4 + len('state_of_charge'),
reduce(lambda x, f: f(x), fs[:-1], cnb['p_nom_opt'])
-
reduce(lambda x, f: f(x), fs[:-1], onb['p_nom_opt'])))
print("Series sums, {}, clustered - disaggregated:" .format(bt))
cnb = getattr(self.clustered_network, bt + '_t')
onb = getattr(self.original_network, bt + '_t')
for s in ts:
print("{:>{}}: {}".format(s, 4 + len('state_of_charge'),
reduce(lambda x, f: f(x), ts[s], cnb[s])
-
reduce(lambda x, f: f(x), ts[s], onb[s])))
profile.disable()
self.stats['check'] = time.time() - t
print('Checks computed in ', self.stats['check']) | Decompose each cluster into separate units and try to optimize them
separately
:param scenario:
:param solver: Solver that may be used to optimize partial networks | entailment |
def modify_origin(self, from_state, from_outcome):
"""Set both from_state and from_outcome at the same time to modify transition origin
:param str from_state: State id of the origin state
:param int from_outcome: Outcome id of the origin port
:raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid
"""
if not (from_state is None and from_outcome is None):
if not isinstance(from_state, string_types):
raise ValueError("Invalid transition origin port: from_state must be a string")
if not isinstance(from_outcome, int):
raise ValueError("Invalid transition origin port: from_outcome must be of type int")
old_from_state = self.from_state
old_from_outcome = self.from_outcome
self._from_state = from_state
self._from_outcome = from_outcome
valid, message = self._check_validity()
if not valid:
self._from_state = old_from_state
self._from_outcome = old_from_outcome
raise ValueError("The transition origin could not be changed: {0}".format(message)) | Set both from_state and from_outcome at the same time to modify transition origin
:param str from_state: State id of the origin state
:param int from_outcome: Outcome id of the origin port
:raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid | entailment |
def modify_target(self, to_state, to_outcome=None):
"""Set both to_state and to_outcome at the same time to modify transition target
:param str to_state: State id of the target state
:param int to_outcome: Outcome id of the target port
:raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid
"""
if not (to_state is None and (to_outcome is not int and to_outcome is not None)):
if not isinstance(to_state, string_types):
raise ValueError("Invalid transition target port: to_state must be a string")
if not isinstance(to_outcome, int) and to_outcome is not None:
raise ValueError("Invalid transition target port: to_outcome must be of type int or None (if to_state "
"is of type str)")
old_to_state = self.to_state
old_to_outcome = self.to_outcome
self._to_state = to_state
self._to_outcome = to_outcome
valid, message = self._check_validity()
if not valid:
self._to_state = old_to_state
self._to_outcome = old_to_outcome
raise ValueError("The transition target could not be changed: {0}".format(message)) | Set both to_state and to_outcome at the same time to modify transition target
:param str to_state: State id of the target state
:param int to_outcome: Outcome id of the target port
:raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid | entailment |
def register_view(self, view):
"""Called when the View was registered"""
ExtendedController.register_view(self, view) # no super to avoid sm based selection initialization
view['name_text'].set_property('editable', True)
view['value_text'].set_property('editable', True)
view['type_text'].set_property('editable', True)
self.tree_view.connect('key-press-event', self.tree_view_keypress_callback)
self._apply_value_on_edited_and_focus_out(view['name_text'], self.apply_new_global_variable_name)
self._apply_value_on_edited_and_focus_out(view['value_text'], self.apply_new_global_variable_value)
self._apply_value_on_edited_and_focus_out(view['type_text'], self.apply_new_global_variable_type)
view['new_global_variable_button'].connect('clicked', self.on_add)
view['delete_global_variable_button'].connect('clicked', self.on_remove)
view['lock_global_variable_button'].connect('clicked', self.on_lock)
view['unlock_global_variable_button'].connect('clicked', self.on_unlock)
self._tree_selection.set_mode(Gtk.SelectionMode.MULTIPLE) | Called when the View was registered | entailment |
def global_variable_is_editable(self, gv_name, intro_message='edit'):
"""Check whether global variable is locked
:param str gv_name: Name of global variable to be checked
:param str intro_message: Message which is used form a useful logger error message if needed
:return:
"""
if self.model.global_variable_manager.is_locked(gv_name):
logger.error("{1} of global variable '{0}' is not possible, as it is locked".format(gv_name, intro_message))
return False
return True | Check whether global variable is locked
:param str gv_name: Name of global variable to be checked
:param str intro_message: Message which is used form a useful logger error message if needed
:return: | entailment |
def on_add(self, widget, data=None):
"""Create a global variable with default value and select its row
Triggered when the add button in the global variables tab is clicked.
"""
gv_name = "new_global_%s" % self.global_variable_counter
self.global_variable_counter += 1
try:
self.model.global_variable_manager.set_variable(gv_name, None)
except (RuntimeError, AttributeError, TypeError) as e:
logger.warning("Addition of new global variable '{0}' failed: {1}".format(gv_name, e))
self.select_entry(gv_name)
return True | Create a global variable with default value and select its row
Triggered when the add button in the global variables tab is clicked. | entailment |
def on_lock(self, widget, data=None):
"""Locks respective selected core element"""
path_list = None
if self.view is not None:
model, path_list = self.tree_view.get_selection().get_selected_rows()
models = [self.list_store[path][self.MODEL_STORAGE_ID] for path in path_list] if path_list else []
if models:
if len(models) > 1:
self._logger.warning("Please select only one element to be locked.")
try:
self.model.global_variable_manager.lock_variable(models[0])
except AttributeError as e:
self._logger.warning("The respective core element of {1}.list_store couldn't be locked. -> {0}"
"".format(e, self.__class__.__name__))
return True
else:
self._logger.warning("Please select an element to be locked.") | Locks respective selected core element | entailment |
def remove_core_element(self, model):
"""Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return:
"""
gv_name = model
if self.global_variable_is_editable(gv_name, "Deletion"):
try:
self.model.global_variable_manager.delete_variable(gv_name)
except AttributeError as e:
logger.warning("The respective global variable '{1}' couldn't be removed. -> {0}"
"".format(e, model)) | Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return: | entailment |
def apply_new_global_variable_name(self, path, new_gv_name):
"""Change global variable name/key according handed string
Updates the global variable name only if different and already in list store.
:param path: The path identifying the edited global variable tree view row, can be str, int or tuple.
:param str new_gv_name: New global variable name
"""
gv_name = self.list_store[path][self.NAME_STORAGE_ID]
if gv_name == new_gv_name or not self.global_variable_is_editable(gv_name, 'Name change'):
return
data_value = self.model.global_variable_manager.get_representation(gv_name)
data_type = self.model.global_variable_manager.get_data_type(gv_name)
try:
self.model.global_variable_manager.delete_variable(gv_name)
self.model.global_variable_manager.set_variable(new_gv_name, data_value, data_type=data_type)
gv_name = new_gv_name
except (AttributeError, RuntimeError, TypeError) as e:
logger.warning("Can not apply new name '{0}'".format(e))
self.update_global_variables_list_store()
self.select_entry(gv_name)
# informing the tab key feature handler function about the changed core element id
if hasattr(self.tree_view_keypress_callback.__func__, "core_element_id"):
self.tree_view_keypress_callback.__func__.core_element_id = gv_name | Change global variable name/key according handed string
Updates the global variable name only if different and already in list store.
:param path: The path identifying the edited global variable tree view row, can be str, int or tuple.
:param str new_gv_name: New global variable name | entailment |
def apply_new_global_variable_value(self, path, new_value_as_string):
"""Change global variable value according handed string
Updates the global variable value only if new value string is different to old representation.
:param path: The path identifying the edited global variable tree view row, can be str, int or tuple.
:param str new_value_as_string: New global variable value as string
"""
if self.list_store[path][self.DATA_TYPE_AS_STRING_STORAGE_ID] == new_value_as_string:
return
gv_name = self.list_store[path][self.NAME_STORAGE_ID]
if not self.global_variable_is_editable(gv_name, 'Change of value'):
return
data_type = self.model.global_variable_manager.get_data_type(gv_name)
old_value = self.model.global_variable_manager.get_representation(gv_name)
# preserve type especially if type=NoneType
if issubclass(data_type, (type(old_value), type(None))):
old_type = data_type
if issubclass(data_type, type(None)):
old_type = type(old_value)
logger.debug("Trying to parse '{}' to type '{}' of old global variable value '{}'".format(
new_value_as_string, old_type.__name__, old_value))
try:
new_value = type_helpers.convert_string_value_to_type_value(new_value_as_string, old_type)
except (AttributeError, ValueError) as e:
if issubclass(data_type, type(None)):
new_value = new_value_as_string
logger.warning("New value '{}' stored as string, previous value '{}' of global variable '{}' was "
"of type '{}'".format(new_value, old_value, gv_name, type(old_value).__name__))
else:
logger.warning("Restoring old value of global variable '{}': {}".format(gv_name, e))
return
else:
logger.error("Global variable '{}' with inconsistent value data type '{}' and data type '{}'".format(
gv_name, [type(old_value).__name__, type(None).__name__], data_type.__name__))
return
try:
self.model.global_variable_manager.set_variable(gv_name, new_value, data_type=data_type)
except (RuntimeError, AttributeError, TypeError) as e:
logger.error("Error while setting global variable '{0}' to value '{1}' -> Exception: {2}".format(
gv_name, new_value, e)) | Change global variable value according handed string
Updates the global variable value only if new value string is different to old representation.
:param path: The path identifying the edited global variable tree view row, can be str, int or tuple.
:param str new_value_as_string: New global variable value as string | entailment |
def apply_new_global_variable_type(self, path, new_data_type_as_string):
"""Change global variable value according handed string
Updates the global variable data type only if different.
:param path: The path identifying the edited global variable tree view row, can be str, int or tuple.
:param str new_data_type_as_string: New global variable data type as string
"""
if self.list_store[path][self.DATA_TYPE_AS_STRING_STORAGE_ID] == new_data_type_as_string:
return
gv_name = self.list_store[path][self.NAME_STORAGE_ID]
if not self.global_variable_is_editable(gv_name, 'Type change'):
return
old_value = self.model.global_variable_manager.get_representation(gv_name)
# check if valid data type string
try:
new_data_type = type_helpers.convert_string_to_type(new_data_type_as_string)
except (AttributeError, ValueError) as e:
logger.error("Could not change data type to '{0}': {1}".format(new_data_type_as_string, e))
return
assert isinstance(new_data_type, type)
# convert old value
if issubclass(new_data_type, type(None)):
new_value = old_value
else: # new_data_type in [str, float, int, list, dict, tuple, bool]:
try:
new_value = new_data_type(old_value)
except (ValueError, TypeError) as e:
new_value = new_data_type()
logger.warning("Old value '{}' of global variable '{}' could not be parsed to new type '{}' and is "
"therefore resetted: {}".format(old_value, gv_name, new_data_type.__name__, e))
# set value in global variable manager
try:
self.model.global_variable_manager.set_variable(gv_name, new_value, data_type=new_data_type)
except (ValueError, RuntimeError, TypeError) as e:
logger.error("Could not set new value unexpected failure '{0}' to value '{1}' -> Exception: {2}"
"".format(gv_name, new_value, e)) | Change global variable value according handed string
Updates the global variable data type only if different.
:param path: The path identifying the edited global variable tree view row, can be str, int or tuple.
:param str new_data_type_as_string: New global variable data type as string | entailment |
def assign_notification_from_gvm(self, model, prop_name, info):
"""Handles gtkmvc3 notification from global variable manager
Calls update of whole list store in case new variable was added. Avoids to run updates without reasonable change.
Holds tree store and updates row elements if is-locked or global variable value changes.
"""
if info['method_name'] in ['set_locked_variable'] or info['result'] is Exception:
return
if info['method_name'] in ['lock_variable', 'unlock_variable']:
key = info.kwargs.get('key', info.args[1]) if len(info.args) > 1 else info.kwargs['key']
if key in self.list_store_iterators:
gv_row_path = self.list_store.get_path(self.list_store_iterators[key])
self.list_store[gv_row_path][self.IS_LOCKED_AS_STRING_STORAGE_ID] = \
str(self.model.global_variable_manager.is_locked(key))
elif info['method_name'] in ['set_variable', 'delete_variable']:
if info['method_name'] == 'set_variable':
key = info.kwargs.get('key', info.args[1]) if len(info.args) > 1 else info.kwargs['key']
if key in self.list_store_iterators:
gv_row_path = self.list_store.get_path(self.list_store_iterators[key])
self.list_store[gv_row_path][self.VALUE_AS_STRING_STORAGE_ID] = \
str(self.model.global_variable_manager.get_representation(key))
self.list_store[gv_row_path][self.DATA_TYPE_AS_STRING_STORAGE_ID] = \
self.model.global_variable_manager.get_data_type(key).__name__
return
self.update_global_variables_list_store()
else:
logger.warning('Notification that is not handled') | Handles gtkmvc3 notification from global variable manager
Calls update of whole list store in case new variable was added. Avoids to run updates without reasonable change.
Holds tree store and updates row elements if is-locked or global variable value changes. | entailment |
def update_global_variables_list_store(self):
"""Updates the global variable list store
Triggered after creation or deletion of a variable has taken place.
"""
# logger.info("update")
self.list_store_iterators = {}
self.list_store.clear()
keys = self.model.global_variable_manager.get_all_keys()
keys.sort()
for key in keys:
iter = self.list_store.append([key,
self.model.global_variable_manager.get_data_type(key).__name__,
str(self.model.global_variable_manager.get_representation(key)),
str(self.model.global_variable_manager.is_locked(key)),
])
self.list_store_iterators[key] = iter | Updates the global variable list store
Triggered after creation or deletion of a variable has taken place. | entailment |
def convert_libraries_in_path(config_path, lib_path, target_path=None):
"""
This function resaves all libraries found at the spcified path
:param lib_path: the path to look for libraries
:return:
"""
for lib in os.listdir(lib_path):
if os.path.isdir(os.path.join(lib_path, lib)) and not '.' == lib[0]:
if os.path.exists(os.path.join(os.path.join(lib_path, lib), "statemachine.yaml")) or \
os.path.exists(os.path.join(os.path.join(lib_path, lib), "statemachine.json")):
if not target_path:
convert(config_path, os.path.join(lib_path, lib))
else:
convert(config_path, os.path.join(lib_path, lib), os.path.join(target_path, lib))
else:
if not target_path:
convert_libraries_in_path(config_path, os.path.join(lib_path, lib))
else:
convert_libraries_in_path(config_path, os.path.join(lib_path, lib), os.path.join(target_path, lib))
else:
if os.path.isdir(os.path.join(lib_path, lib)) and '.' == lib[0]:
logger.debug("lib_root_path/lib_path .*-folder are ignored if within lib_path, "
"e.g. -> {0} -> full path is {1}".format(lib, os.path.join(lib_path, lib))) | This function resaves all libraries found at the spcified path
:param lib_path: the path to look for libraries
:return: | entailment |
def get_view_for_id(self, view_class, element_id, parent_item=None):
"""Searches and returns the View for the given id and type
:param view_class: The view type to search for
:param element_id: The id of element of the searched view
:param gaphas.item.Item parent_item: Restrict the search to this parent item
:return: The view for the given id or None if not found
"""
from rafcon.gui.mygaphas.items.state import StateView
from rafcon.gui.mygaphas.items.connection import DataFlowView, TransitionView
if parent_item is None:
items = self.get_all_items()
else:
items = self.get_children(parent_item)
for item in items:
if view_class is StateView and isinstance(item, StateView) and item.model.state.state_id == element_id:
return item
if view_class is TransitionView and isinstance(item, TransitionView) and \
item.model.transition.transition_id == element_id:
return item
if view_class is DataFlowView and isinstance(item, DataFlowView) and \
item.model.data_flow.data_flow_id == element_id:
return item
return None | Searches and returns the View for the given id and type
:param view_class: The view type to search for
:param element_id: The id of element of the searched view
:param gaphas.item.Item parent_item: Restrict the search to this parent item
:return: The view for the given id or None if not found | entailment |
def wait_for_update(self, trigger_update=False):
"""Update canvas and handle all events in the gtk queue
:param bool trigger_update: Whether to call update_now() or not
"""
if trigger_update:
self.update_now()
from gi.repository import Gtk
from gi.repository import GLib
from threading import Event
event = Event()
# Handle all events from gaphas, but not from gtkmvc3
# Make use of the priority, which is higher for gaphas then for gtkmvc3
def priority_handled(event):
event.set()
priority = (GLib.PRIORITY_HIGH_IDLE + GLib.PRIORITY_DEFAULT_IDLE) / 2
# idle_add is necessary here, as we do not want to block the user from interacting with the GUI
# while gaphas is redrawing
GLib.idle_add(priority_handled, event, priority=priority)
while not event.is_set():
Gtk.main_iteration() | Update canvas and handle all events in the gtk queue
:param bool trigger_update: Whether to call update_now() or not | entailment |
def _get_value(self):
"""
Return two delegating variables. Each variable should contain
a value attribute with the real value.
"""
x, y = self._point.x, self._point.y
self._px, self._py = self._item_point.canvas.get_matrix_i2i(self._item_point,
self._item_target).transform_point(x, y)
return self._px, self._py | Return two delegating variables. Each variable should contain
a value attribute with the real value. | entailment |
def convert_string_to_type(string_value):
"""Converts a string into a type or class
:param string_value: the string to be converted, e.g. "int"
:return: The type derived from string_value, e.g. int
"""
# If the parameter is already a type, return it
if string_value in ['None', type(None).__name__]:
return type(None)
if isinstance(string_value, type) or isclass(string_value):
return string_value
# Get object associated with string
# First check whether we are having a built in type (int, str, etc)
if sys.version_info >= (3,):
import builtins as builtins23
else:
import __builtin__ as builtins23
if hasattr(builtins23, string_value):
obj = getattr(builtins23, string_value)
if type(obj) is type:
return obj
# If not, try to locate the module
try:
obj = locate(string_value)
except ErrorDuringImport as e:
raise ValueError("Unknown type '{0}'".format(e))
# Check whether object is a type
if type(obj) is type:
return locate(string_value)
# Check whether object is a class
if isclass(obj):
return obj
# Raise error if none is the case
raise ValueError("Unknown type '{0}'".format(string_value)) | Converts a string into a type or class
:param string_value: the string to be converted, e.g. "int"
:return: The type derived from string_value, e.g. int | entailment |
def convert_string_value_to_type_value(string_value, data_type):
"""Helper function to convert a given string to a given data type
:param str string_value: the string to convert
:param type data_type: the target data type
:return: the converted value
"""
from ast import literal_eval
try:
if data_type in (str, type(None)):
converted_value = str(string_value)
elif data_type == int:
converted_value = int(string_value)
elif data_type == float:
converted_value = float(string_value)
elif data_type == bool:
converted_value = bool(literal_eval(string_value))
elif data_type in (list, dict, tuple):
converted_value = literal_eval(string_value)
if type(converted_value) != data_type:
raise ValueError("Invalid syntax: {0}".format(string_value))
elif data_type == object:
try:
converted_value = literal_eval(string_value)
except (ValueError, SyntaxError):
converted_value = literal_eval('"' + string_value + '"')
elif isinstance(data_type, type): # Try native type conversion
converted_value = data_type(string_value)
elif isclass(data_type): # Call class constructor
converted_value = data_type(string_value)
else:
raise ValueError("No conversion from string '{0}' to data type '{0}' defined".format(
string_value, data_type.__name__))
except (ValueError, SyntaxError, TypeError) as e:
raise AttributeError("Can't convert '{0}' to type '{1}': {2}".format(string_value, data_type.__name__, e))
return converted_value | Helper function to convert a given string to a given data type
:param str string_value: the string to convert
:param type data_type: the target data type
:return: the converted value | entailment |
def type_inherits_of_type(inheriting_type, base_type):
"""Checks whether inheriting_type inherits from base_type
:param str inheriting_type:
:param str base_type:
:return: True is base_type is base of inheriting_type
"""
assert isinstance(inheriting_type, type) or isclass(inheriting_type)
assert isinstance(base_type, type) or isclass(base_type)
if inheriting_type == base_type:
return True
else:
if len(inheriting_type.__bases__) != 1:
return False
return type_inherits_of_type(inheriting_type.__bases__[0], base_type) | Checks whether inheriting_type inherits from base_type
:param str inheriting_type:
:param str base_type:
:return: True is base_type is base of inheriting_type | entailment |
def clear_results_db(session):
'''Used to clear the result tables in the OEDB. Caution!
This deletes EVERY RESULT SET!'''
from egoio.db_tables.model_draft import EgoGridPfHvResultBus as BusResult,\
EgoGridPfHvResultBusT as BusTResult,\
EgoGridPfHvResultStorage as StorageResult,\
EgoGridPfHvResultStorageT as StorageTResult,\
EgoGridPfHvResultGenerator as GeneratorResult,\
EgoGridPfHvResultGeneratorT as GeneratorTResult,\
EgoGridPfHvResultLine as LineResult,\
EgoGridPfHvResultLineT as LineTResult,\
EgoGridPfHvResultLoad as LoadResult,\
EgoGridPfHvResultLoadT as LoadTResult,\
EgoGridPfHvResultTransformer as TransformerResult,\
EgoGridPfHvResultTransformerT as TransformerTResult,\
EgoGridPfHvResultMeta as ResultMeta
print('Are you sure that you want to clear all results in the OEDB?')
choice = ''
while choice not in ['y', 'n']:
choice = input('(y/n): ')
if choice == 'y':
print('Are you sure?')
choice2 = ''
while choice2 not in ['y', 'n']:
choice2 = input('(y/n): ')
if choice2 == 'y':
print('Deleting all results...')
session.query(BusResult).delete()
session.query(BusTResult).delete()
session.query(StorageResult).delete()
session.query(StorageTResult).delete()
session.query(GeneratorResult).delete()
session.query(GeneratorTResult).delete()
session.query(LoadResult).delete()
session.query(LoadTResult).delete()
session.query(LineResult).delete()
session.query(LineTResult).delete()
session.query(TransformerResult).delete()
session.query(TransformerTResult).delete()
session.query(ResultMeta).delete()
session.commit()
else:
print('Deleting aborted!')
else:
print('Deleting aborted!') | Used to clear the result tables in the OEDB. Caution!
This deletes EVERY RESULT SET! | entailment |
def results_to_oedb(session, network, args, grid='hv', safe_results=False):
"""Return results obtained from PyPSA to oedb
Parameters
----------
session:
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
args: dict
Settings from appl.py
grid: str
Choose voltage-level, currently only 'hv' implemented
safe_results: boolean
If it is set to 'True' the result set will be saved
to the versioned grid schema eventually apart from
being saved to the model_draft by a SQL-script.
ONLY set to True if you know what you are doing.
"""
# Update generator_ids when k_means clustering to get integer ids
if args['network_clustering_kmeans'] != False:
new_index=pd.DataFrame(index = network.generators.index)
new_index['new']=range(len(network.generators))
for col in (network.generators_t):
if not network.generators_t[col].empty:
network.generators_t[col].columns =\
new_index.new[network.generators_t[col].columns]
network.generators.index = range(len(network.generators))
# moved this here to prevent error when not using the mv-schema
import datetime
if grid.lower() == 'mv':
print('MV currently not implemented')
elif grid.lower() == 'hv':
from egoio.db_tables.model_draft import\
EgoGridPfHvResultBus as BusResult,\
EgoGridPfHvResultBusT as BusTResult,\
EgoGridPfHvResultStorage as StorageResult,\
EgoGridPfHvResultStorageT as StorageTResult,\
EgoGridPfHvResultGenerator as GeneratorResult,\
EgoGridPfHvResultGeneratorT as GeneratorTResult,\
EgoGridPfHvResultLine as LineResult,\
EgoGridPfHvResultLineT as LineTResult,\
EgoGridPfHvResultLoad as LoadResult,\
EgoGridPfHvResultLoadT as LoadTResult,\
EgoGridPfHvResultTransformer as TransformerResult,\
EgoGridPfHvResultTransformerT as TransformerTResult,\
EgoGridPfHvResultMeta as ResultMeta,\
EgoGridPfHvSource as Source
else:
print('Please enter mv or hv!')
print('Uploading results to db...')
# get last result id and get new one
last_res_id = session.query(func.max(ResultMeta.result_id)).scalar()
if last_res_id == None:
new_res_id = 1
else:
new_res_id = last_res_id + 1
# result meta data
res_meta = ResultMeta()
meta_misc = []
for arg, value in args.items():
if arg not in dir(res_meta) and arg not in ['db', 'lpfile',
'results', 'export']:
meta_misc.append([arg, str(value)])
res_meta.result_id = new_res_id
res_meta.scn_name = args['scn_name']
res_meta.calc_date = datetime.datetime.now()
res_meta.user_name = args['user_name']
res_meta.method = args['method']
res_meta.start_snapshot = args['start_snapshot']
res_meta.end_snapshot = args['end_snapshot']
res_meta.safe_results = safe_results
res_meta.snapshots = network.snapshots.tolist()
res_meta.solver = args['solver']
res_meta.settings = meta_misc
session.add(res_meta)
session.commit()
# get source_id
sources = pd.read_sql(session.query(Source).statement, session.bind)
for gen in network.generators.index:
if network.generators.carrier[gen] not in sources.name.values:
new_source = Source()
new_source.source_id = session.query(
func.max(Source.source_id)).scalar()+1
new_source.name = network.generators.carrier[gen]
session.add(new_source)
session.commit()
sources = pd.read_sql(
session.query(Source).statement, session.bind)
try:
old_source_id = int(
sources.source_id[
sources.name == network.generators.carrier[gen]])
network.generators.set_value(gen, 'source', int(old_source_id))
except:
print(
'Source ' + network.generators.carrier[gen] +
' is not in the source table!')
for stor in network.storage_units.index:
if network.storage_units.carrier[stor] not in sources.name.values:
new_source = Source()
new_source.source_id = session.query(
func.max(Source.source_id)).scalar()+1
new_source.name = network.storage_units.carrier[stor]
session.add(new_source)
session.commit()
sources = pd.read_sql(
session.query(Source).statement, session.bind)
try:
old_source_id = int(
sources.source_id[
sources.name == network.storage_units.carrier[stor]])
network.storage_units.set_value(stor, 'source', int(old_source_id))
except:
print(
'Source ' + network.storage_units.carrier[stor] +
' is not in the source table!')
whereismyindex = {BusResult: network.buses.index,
LoadResult: network.loads.index,
LineResult: network.lines.index,
TransformerResult: network.transformers.index,
StorageResult: network.storage_units.index,
GeneratorResult: network.generators.index,
BusTResult: network.buses.index,
LoadTResult: network.loads.index,
LineTResult: network.lines.index,
TransformerTResult: network.transformers.index,
StorageTResult: network.storage_units.index,
GeneratorTResult: network.generators.index}
whereismydata = {BusResult: network.buses,
LoadResult: network.loads,
LineResult: network.lines,
TransformerResult: network.transformers,
StorageResult: network.storage_units,
GeneratorResult: network.generators,
BusTResult: network.buses_t,
LoadTResult: network.loads_t,
LineTResult: network.lines_t,
TransformerTResult: network.transformers_t,
StorageTResult: network.storage_units_t,
GeneratorTResult: network.generators_t}
new_to_old_name = {'p_min_pu_fixed': 'p_min_pu',
'p_max_pu_fixed': 'p_max_pu',
'dispatch': 'former_dispatch',
'current_type': 'carrier',
'soc_cyclic': 'cyclic_state_of_charge',
'soc_initial': 'state_of_charge_initial'}
ormclasses = [BusResult, LoadResult, LineResult, TransformerResult,
GeneratorResult, StorageResult, BusTResult, LoadTResult,
LineTResult, TransformerTResult, GeneratorTResult,
StorageTResult]
for ormclass in ormclasses:
for index in whereismyindex[ormclass]:
myinstance = ormclass()
columns = ormclass.__table__.columns.keys()
columns.remove('result_id')
myinstance.result_id = new_res_id
for col in columns:
if '_id' in col:
class_id_name = col
else:
continue
setattr(myinstance, class_id_name, index)
columns.remove(class_id_name)
if str(ormclass)[:-2].endswith('T'):
for col in columns:
if col == 'soc_set':
try:
setattr(myinstance, col, getattr(
whereismydata[ormclass],
'state_of_charge_set')[index].tolist())
except:
pass
else:
try:
setattr(myinstance, col, getattr(
whereismydata[ormclass], col)[index].tolist())
except:
pass
session.add(myinstance)
else:
for col in columns:
if col in new_to_old_name:
if col == 'soc_cyclic':
try:
setattr(myinstance, col, bool(
whereismydata[ormclass].loc[index,
new_to_old_name[col]]))
except:
pass
elif 'Storage' in str(ormclass) and col == 'dispatch':
try:
setattr(myinstance, col,
whereismydata[ormclass].loc[index, col])
except:
pass
else:
try:
setattr(
myinstance, col, whereismydata[ormclass].\
loc[index, new_to_old_name[col]])
except:
pass
elif col in ['s_nom_extendable', 'p_nom_extendable']:
try:
setattr(myinstance, col, bool(
whereismydata[ormclass].loc[index, col]))
except:
pass
else:
try:
setattr(myinstance, col,
whereismydata[ormclass].loc[index, col])
except:
pass
session.add(myinstance)
session.commit()
print('Upload finished!')
return | Return results obtained from PyPSA to oedb
Parameters
----------
session:
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
args: dict
Settings from appl.py
grid: str
Choose voltage-level, currently only 'hv' implemented
safe_results: boolean
If it is set to 'True' the result set will be saved
to the versioned grid schema eventually apart from
being saved to the model_draft by a SQL-script.
ONLY set to True if you know what you are doing. | entailment |
def run_sql_script(conn, scriptname='results_md2grid.sql'):
"""This function runs .sql scripts in the folder 'sql_scripts' """
script_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'sql_scripts'))
script_str = open(os.path.join(script_dir, scriptname)).read()
conn.execution_options(autocommit=True).execute(script_str)
return | This function runs .sql scripts in the folder 'sql_scripts' | entailment |
def extension (network, session, version, scn_extension, start_snapshot,
end_snapshot, **kwargs):
"""
Function that adds an additional network to the existing network container.
The new network can include every PyPSA-component (e.g. buses, lines, links).
To connect it to the existing network, transformers are needed.
All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table.
The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035').
Until now, the tables include three additional scenarios:
'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA)
'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2
'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway
Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035'
Parameters
-----
network : The existing network container (e.g. scenario 'NEP 2035')
session : session-data
overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_')
start_snapshot, end_snapshot: Simulation time
Returns
------
network : Network container including existing and additional network
"""
if version is None:
ormcls_prefix = 'EgoGridPfHvExtension'
else:
ormcls_prefix = 'EgoPfHvExtension'
# Adding overlay-network to existing network
scenario = NetworkScenario(session,
version = version,
prefix=ormcls_prefix,
method=kwargs.get('method', 'lopf'),
start_snapshot=start_snapshot,
end_snapshot=end_snapshot,
scn_name='extension_' + scn_extension)
network = scenario.build_network(network)
# Allow lossless links to conduct bidirectional
network.links.loc[network.links.efficiency == 1.0, 'p_min_pu'] = -1
# Set coordinates for new buses
extension_buses = network.buses[network.buses.scn_name ==
'extension_' + scn_extension]
for idx, row in extension_buses.iterrows():
wkt_geom = to_shape(row['geom'])
network.buses.loc[idx, 'x'] = wkt_geom.x
network.buses.loc[idx, 'y'] = wkt_geom.y
return network | Function that adds an additional network to the existing network container.
The new network can include every PyPSA-component (e.g. buses, lines, links).
To connect it to the existing network, transformers are needed.
All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table.
The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035').
Until now, the tables include three additional scenarios:
'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA)
'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2
'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway
Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035'
Parameters
-----
network : The existing network container (e.g. scenario 'NEP 2035')
session : session-data
overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_')
start_snapshot, end_snapshot: Simulation time
Returns
------
network : Network container including existing and additional network | entailment |
def decommissioning(network, session, args, **kwargs):
"""
Function that removes components in a decommissioning-scenario from
the existing network container.
Currently, only lines can be decommissioned.
All components of the decommissioning scenario need to be inserted in
the fitting 'model_draft.ego_grid_pf_hv_extension_' table.
The scn_name in the tables have to be labled with 'decommissioning_'
+ scn_name (e.g. 'decommissioning_nep2035').
Parameters
-----
network : The existing network container (e.g. scenario 'NEP 2035')
session : session-data
overlay_scn_name : Name of the decommissioning scenario
Returns
------
network : Network container including decommissioning
"""
if args['gridversion'] == None:
ormclass = getattr(import_module('egoio.db_tables.model_draft'),
'EgoGridPfHvExtensionLine')
else:
ormclass = getattr(import_module('egoio.db_tables.grid'),
'EgoPfHvExtensionLine')
query = session.query(ormclass).filter(
ormclass.scn_name == 'decommissioning_' +
args['scn_decommissioning'])
df_decommisionning = pd.read_sql(query.statement,
session.bind,
index_col='line_id')
df_decommisionning.index = df_decommisionning.index.astype(str)
for idx, row in network.lines.iterrows():
if (row['s_nom_min'] !=0) & (
row['scn_name'] =='extension_' + args['scn_decommissioning']):
v_nom_dec = df_decommisionning['v_nom'][(
df_decommisionning.project == row['project']) & (
df_decommisionning.project_id == row['project_id'])]
if (v_nom_dec == 110).any():
network.lines.s_nom_min[network.lines.index == idx]\
= args['branch_capacity_factor']['HV'] *\
network.lines.s_nom_min
else:
network.lines.s_nom_min[network.lines.index == idx] =\
args['branch_capacity_factor']['eHV'] *\
network.lines.s_nom_min
### Drop decommissioning-lines from existing network
network.lines = network.lines[~network.lines.index.isin(
df_decommisionning.index)]
return network | Function that removes components in a decommissioning-scenario from
the existing network container.
Currently, only lines can be decommissioned.
All components of the decommissioning scenario need to be inserted in
the fitting 'model_draft.ego_grid_pf_hv_extension_' table.
The scn_name in the tables have to be labled with 'decommissioning_'
+ scn_name (e.g. 'decommissioning_nep2035').
Parameters
-----
network : The existing network container (e.g. scenario 'NEP 2035')
session : session-data
overlay_scn_name : Name of the decommissioning scenario
Returns
------
network : Network container including decommissioning | entailment |
def distance(x0, x1, y0, y1):
"""
Function that calculates the square of the distance between two points.
Parameters
-----
x0: x - coordinate of point 0
x1: x - coordinate of point 1
y0: y - coordinate of point 0
y1: y - coordinate of point 1
Returns
------
distance : float
square of distance
"""
# Calculate square of the distance between two points (Pythagoras)
distance = (x1.values- x0.values)*(x1.values- x0.values)\
+ (y1.values- y0.values)*(y1.values- y0.values)
return distance | Function that calculates the square of the distance between two points.
Parameters
-----
x0: x - coordinate of point 0
x1: x - coordinate of point 1
y0: y - coordinate of point 0
y1: y - coordinate of point 1
Returns
------
distance : float
square of distance | entailment |
def calc_nearest_point(bus1, network):
"""
Function that finds the geographical nearest point in a network from a given bus.
Parameters
-----
bus1: float
id of bus
network: Pypsa network container
network including the comparable buses
Returns
------
bus0 : float
bus_id of nearest point
"""
bus1_index = network.buses.index[network.buses.index == bus1]
forbidden_buses = np.append(
bus1_index.values, network.lines.bus1[
network.lines.bus0 == bus1].values)
forbidden_buses = np.append(
forbidden_buses, network.lines.bus0[network.lines.bus1 == bus1].values)
forbidden_buses = np.append(
forbidden_buses, network.links.bus0[network.links.bus1 == bus1].values)
forbidden_buses = np.append(
forbidden_buses, network.links.bus1[network.links.bus0 == bus1].values)
x0 = network.buses.x[network.buses.index.isin(bus1_index)]
y0 = network.buses.y[network.buses.index.isin(bus1_index)]
comparable_buses = network.buses[~network.buses.index.isin(
forbidden_buses)]
x1 = comparable_buses.x
y1 = comparable_buses.y
distance = (x1.values - x0.values)*(x1.values - x0.values) + \
(y1.values - y0.values)*(y1.values - y0.values)
min_distance = distance.min()
bus0 = comparable_buses[(((x1.values - x0.values)*(x1.values - x0.values
) + (y1.values - y0.values)*(y1.values - y0.values)) == min_distance)]
bus0 = bus0.index[bus0.index == bus0.index.max()]
bus0 = ''.join(bus0.values)
return bus0 | Function that finds the geographical nearest point in a network from a given bus.
Parameters
-----
bus1: float
id of bus
network: Pypsa network container
network including the comparable buses
Returns
------
bus0 : float
bus_id of nearest point | entailment |
def map_ormclass(self, name):
""" Populate _mapped attribute with orm class
Parameters
----------
name : str
Component part of orm class name. Concatenated with _prefix.
"""
try:
self._mapped[name] = getattr(self._pkg, self._prefix + name)
except AttributeError:
print('Warning: Relation %s does not exist.' % name) | Populate _mapped attribute with orm class
Parameters
----------
name : str
Component part of orm class name. Concatenated with _prefix. | entailment |
def configure_timeindex(self):
""" Construct a DateTimeIndex with the queried temporal resolution,
start- and end_snapshot. """
try:
ormclass = self._mapped['TempResolution']
if self.version:
tr = self.session.query(ormclass).filter(
ormclass.temp_id == self.temp_id).filter(
ormclass.version == self.version).one()
else:
tr = self.session.query(ormclass).filter(
ormclass.temp_id == self.temp_id).one()
except (KeyError, NoResultFound):
print('temp_id %s does not exist.' % self.temp_id)
timeindex = pd.DatetimeIndex(start=tr.start_time,
periods=tr.timesteps,
freq=tr.resolution)
self.timeindex = timeindex[self.start_snapshot - 1: self.end_snapshot]
""" pandas.tseries.index.DateTimeIndex :
Index of snapshots or timesteps. """ | Construct a DateTimeIndex with the queried temporal resolution,
start- and end_snapshot. | entailment |
def fetch_by_relname(self, name):
""" Construct DataFrame with component data from filtered table data.
Parameters
----------
name : str
Component name.
Returns
-------
pd.DataFrame
Component data.
"""
ormclass = self._mapped[name]
query = self.session.query(ormclass)
if name != carr_ormclass:
query = query.filter(
ormclass.scn_name == self.scn_name)
if self.version:
query = query.filter(ormclass.version == self.version)
# TODO: Naming is not consistent. Change in database required.
if name == 'Transformer':
name = 'Trafo'
df = pd.read_sql(query.statement,
self.session.bind,
index_col=name.lower() + '_id')
if name == 'Link':
df['bus0'] = df.bus0.astype(int)
df['bus1'] = df.bus1.astype(int)
if 'source' in df:
df.source = df.source.map(self.id_to_source())
return df | Construct DataFrame with component data from filtered table data.
Parameters
----------
name : str
Component name.
Returns
-------
pd.DataFrame
Component data. | entailment |
def series_fetch_by_relname(self, name, column):
""" Construct DataFrame with component timeseries data from filtered
table data.
Parameters
----------
name : str
Component name.
column : str
Component field with timevarying data.
Returns
-------
pd.DataFrame
Component data.
"""
ormclass = self._mapped[name]
# TODO: This is implemented in a not very robust way.
id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id'
id_column = id_column.lower()
query = self.session.query(
getattr(ormclass, id_column),
getattr(ormclass, column)[self.start_snapshot: self.end_snapshot].
label(column)).filter(and_(
ormclass.scn_name == self.scn_name,
ormclass.temp_id == self.temp_id))
if self.version:
query = query.filter(ormclass.version == self.version)
df = pd.io.sql.read_sql(query.statement,
self.session.bind,
columns=[column],
index_col=id_column)
df.index = df.index.astype(str)
# change of format to fit pypsa
df = df[column].apply(pd.Series).transpose()
try:
assert not df.empty
df.index = self.timeindex
except AssertionError:
print("No data for %s in column %s." % (name, column))
return df | Construct DataFrame with component timeseries data from filtered
table data.
Parameters
----------
name : str
Component name.
column : str
Component field with timevarying data.
Returns
-------
pd.DataFrame
Component data. | entailment |
def build_network(self, network=None, *args, **kwargs):
""" Core method to construct PyPSA Network object.
"""
# TODO: build_network takes care of divergences in database design and
# future PyPSA changes from PyPSA's v0.6 on. This concept should be
# replaced, when the oedb has a revision system in place, because
# sometime this will break!!!
if network != None:
network = network
else:
network = pypsa.Network()
network.set_snapshots(self.timeindex)
timevarying_override = False
if pypsa.__version__ == '0.11.0':
old_to_new_name = {'Generator':
{'p_min_pu_fixed': 'p_min_pu',
'p_max_pu_fixed': 'p_max_pu',
'source': 'carrier',
'dispatch': 'former_dispatch'},
'Bus':
{'current_type': 'carrier'},
'Transformer':
{'trafo_id': 'transformer_id'},
'Storage':
{'p_min_pu_fixed': 'p_min_pu',
'p_max_pu_fixed': 'p_max_pu',
'soc_cyclic': 'cyclic_state_of_charge',
'soc_initial': 'state_of_charge_initial',
'source': 'carrier'}}
timevarying_override = True
else:
old_to_new_name = {'Storage':
{'soc_cyclic': 'cyclic_state_of_charge',
'soc_initial': 'state_of_charge_initial'}}
for comp, comp_t_dict in self.config.items():
# TODO: This is confusing, should be fixed in db
pypsa_comp_name = 'StorageUnit' if comp == 'Storage' else comp
df = self.fetch_by_relname(comp)
if comp in old_to_new_name:
tmp = old_to_new_name[comp]
df.rename(columns=tmp, inplace=True)
network.import_components_from_dataframe(df, pypsa_comp_name)
if comp_t_dict:
for comp_t, columns in comp_t_dict.items():
for col in columns:
df_series = self.series_fetch_by_relname(comp_t, col)
# TODO: VMagPuSet is not implemented.
if timevarying_override and comp == 'Generator' \
and not df_series.empty:
idx = df[df.former_dispatch == 'flexible'].index
idx = [i for i in idx if i in df_series.columns]
df_series.drop(idx, axis=1, inplace=True)
try:
pypsa.io.import_series_from_dataframe(
network,
df_series,
pypsa_comp_name,
col)
except (ValueError, AttributeError):
print("Series %s of component %s could not be "
"imported" % (col, pypsa_comp_name))
# populate carrier attribute in PyPSA network
network.import_components_from_dataframe(
self.fetch_by_relname(carr_ormclass), 'Carrier')
self.network = network
return network | Core method to construct PyPSA Network object. | entailment |
def run(self):
""" This defines the sequence of actions that are taken when the preemptive concurrency state is executed
:return:
"""
logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else ""))
self.setup_run()
try:
concurrency_history_item = self.setup_forward_or_backward_execution()
concurrency_queue = self.start_child_states(concurrency_history_item)
#######################################################
# wait for the first threads to finish
#######################################################
finished_thread_id = concurrency_queue.get()
finisher_state = self.states[finished_thread_id]
finisher_state.join()
# preempt all child states
if not self.backward_execution:
for state_id, state in self.states.items():
state.recursively_preempt_states()
# join all states
for history_index, state in enumerate(self.states.values()):
self.join_state(state, history_index, concurrency_history_item)
self.add_state_execution_output_to_scoped_data(state.output_data, state)
self.update_scoped_variables_with_output_dictionary(state.output_data, state)
# add the data of the first state now to overwrite data of the preempted states
self.add_state_execution_output_to_scoped_data(finisher_state.output_data, finisher_state)
self.update_scoped_variables_with_output_dictionary(finisher_state.output_data, finisher_state)
#######################################################
# handle backward execution case
#######################################################
if self.states[finished_thread_id].backward_execution:
return self.finalize_backward_execution()
else:
self.backward_execution = False
#######################################################
# handle no transition
#######################################################
transition = self.get_transition_for_outcome(self.states[finished_thread_id],
self.states[finished_thread_id].final_outcome)
if transition is None:
# final outcome is set here
transition = self.handle_no_transition(self.states[finished_thread_id])
# it the transition is still None, then the state was preempted or aborted, in this case return
if transition is None:
self.output_data["error"] = RuntimeError("state aborted")
else:
if 'error' in self.states[finished_thread_id].output_data:
self.output_data["error"] = self.states[finished_thread_id].output_data['error']
self.final_outcome = self.outcomes[transition.to_outcome]
return self.finalize_concurrency_state(self.final_outcome)
except Exception as e:
logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc())))
self.output_data["error"] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
return self.finalize(Outcome(-1, "aborted")) | This defines the sequence of actions that are taken when the preemptive concurrency state is executed
:return: | entailment |
def _check_transition_validity(self, check_transition):
""" Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState
:param check_transition: the transition to check for validity
:return:
"""
valid, message = super(PreemptiveConcurrencyState, self)._check_transition_validity(check_transition)
if not valid:
return False, message
# Only transitions to the parent state are allowed
if check_transition.to_state != self.state_id:
return False, "Only transitions to the parent state are allowed"
return True, message | Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState
:param check_transition: the transition to check for validity
:return: | entailment |
def prepare_destruction(self):
"""Prepares the model for destruction
Un-registers itself as observer from the state machine and the root state
"""
try:
self.relieve_model(self.state_machine_model)
assert self.__buffered_root_state_model is self.state_machine_model.root_state
self.relieve_model(self.__buffered_root_state_model)
self.state_machine_model = None
self.__buffered_root_state_model = None
self.modifications.prepare_destruction()
except KeyError: # Might happen if the observer was already unregistered
pass
if self.active_action:
try:
self.active_action.prepare_destruction()
except Exception as e:
logger.exception("The modification history has had left over an active-action and "
"could not destroy it {0}.".format(e))
self.active_action = None | Prepares the model for destruction
Un-registers itself as observer from the state machine and the root state | entailment |
def recover_specific_version(self, pointer_on_version_to_recover):
""" Recovers a specific version of the all_time_history element by doing several undos and redos.
:param pointer_on_version_to_recover: the id of the list element which is to recover
:return:
"""
# search for traceable path -> list of action to undo and list of action to redo
logger.info("Going to history status #{0}".format(pointer_on_version_to_recover))
undo_redo_list = self.modifications.get_undo_redo_list_from_active_trail_history_item_to_version_id(pointer_on_version_to_recover)
logger.debug("Multiple undo and redo to reach modification history element of version {0} "
"-> undo-redo-list is: {1}".format(pointer_on_version_to_recover, undo_redo_list))
# logger.debug("acquire lock 1 - for multiple action {0}".format(self.modifications.trail_pointer))
self.state_machine_model.storage_lock.acquire()
# logger.debug("acquired lock 1 - for multiple action {0}".format(self.modifications.trail_pointer))
for elem in undo_redo_list:
if elem[1] == 'undo':
# do undo
self._undo(elem[0])
else:
# do redo
self._redo(elem[0])
self.modifications.reorganize_trail_history_for_version_id(pointer_on_version_to_recover)
self.change_count += 1
# logger.debug("release lock 1 - for multiple action {0}".format(self.modifications.trail_pointer))
self.state_machine_model.storage_lock.release() | Recovers a specific version of the all_time_history element by doing several undos and redos.
:param pointer_on_version_to_recover: the id of the list element which is to recover
:return: | entailment |
def assign_notification_states_after(self, model, prop_name, info):
"""
This method is called, when any state, transition, data flow, etc. within the state machine modifications. This
then typically requires a redraw of the graphical editor, to display these modifications immediately.
:param model: The state machine model
:param prop_name: The property that was changed
:param info: Information about the change
"""
# logger.verbose("states_after: " + str(NotificationOverview(info, False, self.__class__.__name__)))
if self.busy or info.method_name == 'state_change' and \
info.kwargs.prop_name == 'state' and \
info.kwargs.method_name in BY_EXECUTION_TRIGGERED_OBSERVABLE_STATE_METHODS:
return
else:
# logger.debug("History states_AFTER") # \n%s \n%s \n%s" % (model, prop_name, info))
# avoid to vast computation time
if 'kwargs' in info and 'method_name' in info['kwargs'] and \
info['kwargs']['method_name'] in BY_EXECUTION_TRIGGERED_OBSERVABLE_STATE_METHODS:
return
overview = NotificationOverview(info, self.with_verbose, self.__class__.__name__)
# handle interrupts of action caused by exceptions
if overview['result'][-1] == "CRASH in FUNCTION" or isinstance(overview['result'][-1], Exception):
if self.count_before == 1:
return self._interrupt_active_action(info)
pass
# modifications of parent are not observed
if not overview['method_name'][0] == 'state_change' or overview['method_name'][-1] == 'parent':
return
# logger.debug("History states_AFTER {0}".format(overview))
# decrease counter and finish action if count_before = 0
if self.locked:
self.after_count()
if self.count_before == 0:
self.finish_new_action(overview)
if self.with_verbose:
logger.verbose("HISTORY COUNT WAS OF SUCCESS")
else:
logger.error("HISTORY after not count [states] -> For every before there should be a after.") | This method is called, when any state, transition, data flow, etc. within the state machine modifications. This
then typically requires a redraw of the graphical editor, to display these modifications immediately.
:param model: The state machine model
:param prop_name: The property that was changed
:param info: Information about the change | entailment |
def get_undo_redo_list_from_active_trail_history_item_to_version_id(self, version_id):
"""Perform fast search from currently active branch to specific version_id and collect all recovery steps.
"""
all_trail_action = [a.version_id for a in self.single_trail_history() if a is not None]
all_active_action = self.get_all_active_actions()
undo_redo_list = []
_undo_redo_list = []
intermediate_version_id = version_id
if self.with_verbose:
logger.verbose("Version_id : {0} in".format(intermediate_version_id))
logger.verbose("Active actions: {0} in: {1}".format(all_active_action,
intermediate_version_id in all_active_action))
logger.verbose("Trail actions : {0} in: {1}".format(all_trail_action,
intermediate_version_id in all_trail_action))
if intermediate_version_id not in all_trail_action:
# get undo to come from version_id to trail_action
while intermediate_version_id not in all_trail_action:
_undo_redo_list.insert(0, (intermediate_version_id, 'redo'))
intermediate_version_id = self.all_time_history[intermediate_version_id].prev_id
intermediate_goal_version_id = intermediate_version_id
else:
intermediate_goal_version_id = version_id
intermediate_version_id = self.trail_history[self.trail_pointer].version_id
if self.with_verbose:
logger.verbose("Version_id : {0} {1}".format(intermediate_goal_version_id, intermediate_version_id))
logger.verbose("Active actions: {0} in: {1}".format(all_active_action,
intermediate_version_id in all_active_action))
logger.verbose("Trail actions : {0} in: {1}".format(all_trail_action,
intermediate_version_id in all_trail_action))
# collect undo and redo on trail
if intermediate_goal_version_id in all_active_action:
# collect needed undo to reach intermediate version
while not intermediate_version_id == intermediate_goal_version_id:
undo_redo_list.append((intermediate_version_id, 'undo'))
intermediate_version_id = self.all_time_history[intermediate_version_id].prev_id
elif intermediate_goal_version_id in all_trail_action:
# collect needed redo to reach intermediate version
while not intermediate_version_id == intermediate_goal_version_id:
intermediate_version_id = self.all_time_history[intermediate_version_id].next_id
undo_redo_list.append((intermediate_version_id, 'redo'))
for elem in _undo_redo_list:
undo_redo_list.append(elem)
return undo_redo_list | Perform fast search from currently active branch to specific version_id and collect all recovery steps. | entailment |
def set_pane_position(self, config_id):
"""Adjusts the position of a GTK Pane to a value stored in the runtime config file. If there was no value
stored, the pane's position is set to a default value.
:param config_id: The pane identifier saved in the runtime config file
"""
default_pos = constants.DEFAULT_PANE_POS[config_id]
position = global_runtime_config.get_config_value(config_id, default_pos)
pane_id = constants.PANE_ID[config_id]
self.view[pane_id].set_position(position) | Adjusts the position of a GTK Pane to a value stored in the runtime config file. If there was no value
stored, the pane's position is set to a default value.
:param config_id: The pane identifier saved in the runtime config file | entailment |
def model_changed(self, model, prop_name, info):
""" Highlight buttons according actual execution status. Furthermore it triggers the label redraw of the active
state machine.
"""
# TODO: find nice solution
# this in only required if the GUI is terminated via Ctrl+C signal
if not self.view:
# this means that the main window is currently under destruction
return
execution_engine = rafcon.core.singleton.state_machine_execution_engine
label_string = str(execution_engine.status.execution_mode)
label_string = label_string.replace("STATE_MACHINE_EXECUTION_STATUS.", "")
self.view['execution_status_label'].set_text(label_string)
current_execution_mode = execution_engine.status.execution_mode
if current_execution_mode is StateMachineExecutionStatus.STARTED:
self.view['step_buttons'].hide()
self._set_single_button_active('button_start_shortcut')
elif current_execution_mode is StateMachineExecutionStatus.PAUSED:
self.view['step_buttons'].hide()
self._set_single_button_active('button_pause_shortcut')
elif execution_engine.finished_or_stopped():
self.view['step_buttons'].hide()
self._set_single_button_active('button_stop_shortcut')
else: # all step modes
self.view['step_buttons'].show()
self._set_single_button_active('button_step_mode_shortcut') | Highlight buttons according actual execution status. Furthermore it triggers the label redraw of the active
state machine. | entailment |
def focus_notebook_page_of_controller(self, controller):
"""Puts the focus on the given child controller
The method implements focus request of the notebooks in left side-bar of the main window. Thereby it is the
master-function of focus pattern of the notebooks in left side-bar.
Actual pattern is:
* Execution-History is put to focus any time requested (request occur at the moment when the state-machine
is started and stopped.
* Modification-History one time focused while and one time after execution if requested.
:param controller The controller which request to be focused.
"""
# TODO think about to may substitute Controller- by View-objects it is may the better design
if controller not in self.get_child_controllers():
return
# logger.info("focus controller {0}".format(controller))
if not self.modification_history_was_focused and isinstance(controller, ModificationHistoryTreeController) and \
self.view is not None:
self.view.bring_tab_to_the_top('history')
self.modification_history_was_focused = True
if self.view is not None and isinstance(controller, ExecutionHistoryTreeController):
self.view.bring_tab_to_the_top('execution_history')
self.modification_history_was_focused = False | Puts the focus on the given child controller
The method implements focus request of the notebooks in left side-bar of the main window. Thereby it is the
master-function of focus pattern of the notebooks in left side-bar.
Actual pattern is:
* Execution-History is put to focus any time requested (request occur at the moment when the state-machine
is started and stopped.
* Modification-History one time focused while and one time after execution if requested.
:param controller The controller which request to be focused. | entailment |
def undock_sidebar(self, window_key, widget=None, event=None):
"""Undock/separate sidebar into independent window
The sidebar is undocked and put into a separate new window. The sidebar is hidden in the main-window by
triggering the method on_[widget_name]_hide_clicked(). Triggering this method shows the
[widget_name]_return_button in the main-window, which does not serve any purpose when the bar is undocked.
This button is therefore deliberately
hidden. The undock button, which is also part of the sidebar is hidden, because the re-dock button is
included in the top_tool_bar of the newly opened window. Not hiding it will result in two re-dock buttons
visible in the new window. The new window size and position are loaded from runtime_config, if they exist.
"""
undocked_window_name = window_key.lower() + '_window'
widget_name = window_key.lower()
undocked_window_view = getattr(self.view, undocked_window_name)
undocked_window = undocked_window_view.get_top_widget()
if os.getenv("RAFCON_START_MINIMIZED", False):
undocked_window.iconify()
gui_helper_label.set_window_size_and_position(undocked_window, window_key)
self.view[widget_name].reparent(undocked_window_view['central_eventbox'])
self.view['undock_{}_button'.format(widget_name)].hide()
getattr(self, 'on_{}_hide_clicked'.format(widget_name))(None)
self.view['{}_return_button'.format(widget_name)].hide()
main_window = self.view.get_top_widget()
state_handler = main_window.connect('window-state-event', self.undock_window_callback, undocked_window)
self.handler_ids[undocked_window_name] = {"state": state_handler}
undocked_window.set_transient_for(main_window)
main_window.grab_focus()
global_runtime_config.set_config_value(window_key + '_WINDOW_UNDOCKED', True) | Undock/separate sidebar into independent window
The sidebar is undocked and put into a separate new window. The sidebar is hidden in the main-window by
triggering the method on_[widget_name]_hide_clicked(). Triggering this method shows the
[widget_name]_return_button in the main-window, which does not serve any purpose when the bar is undocked.
This button is therefore deliberately
hidden. The undock button, which is also part of the sidebar is hidden, because the re-dock button is
included in the top_tool_bar of the newly opened window. Not hiding it will result in two re-dock buttons
visible in the new window. The new window size and position are loaded from runtime_config, if they exist. | entailment |
def redock_sidebar(self, window_key, sidebar_name, controller_name, widget, event=None):
"""Redock/embed sidebar into main window
The size & position of the open window are saved to the runtime_config file, the sidebar is redocked back
to the main-window, and the left-bar window is hidden. The undock button of the bar is made visible again.
"""
config_parameter_undocked = window_key + '_WINDOW_UNDOCKED'
config_id_for_pane_position = window_key + '_DOCKED_POS'
undocked_window_name = window_key.lower() + '_window'
widget_name = window_key.lower()
undocked_window_view = getattr(self.view, undocked_window_name)
self.view['main_window'].disconnect(self.handler_ids[undocked_window_name]['state'])
getattr(self, 'on_{}_return_clicked'.format(widget_name))(None)
undocked_window_view['central_eventbox'].remove(self.view[widget_name])
self.view[sidebar_name].pack_start(self.view[widget_name], True, True, 0)
self.get_controller(controller_name).hide_window()
self.view['undock_{}_button'.format(widget_name)].show()
# restore the position of the pane
self.set_pane_position(config_id_for_pane_position)
global_runtime_config.set_config_value(config_parameter_undocked, False)
return True | Redock/embed sidebar into main window
The size & position of the open window are saved to the runtime_config file, the sidebar is redocked back
to the main-window, and the left-bar window is hidden. The undock button of the bar is made visible again. | entailment |
def on_notebook_tab_switch(self, notebook, page, page_num, title_label, window, notebook_identifier):
"""Triggered whenever a left-bar notebook tab is changed.
Updates the title of the corresponding notebook and updates the title of the left-bar window in case un-docked.
:param notebook: The GTK notebook where a tab-change occurred
:param page_num: The page number of the currently-selected tab
:param title_label: The label holding the notebook's title
:param window: The left-bar window, for which the title should be changed
:param notebook_identifier: A string identifying whether the notebook is the upper or the lower one
"""
title = gui_helper_label.set_notebook_title(notebook, page_num, title_label)
window.reset_title(title, notebook_identifier)
self.on_switch_page_check_collapse_button(notebook, page_num) | Triggered whenever a left-bar notebook tab is changed.
Updates the title of the corresponding notebook and updates the title of the left-bar window in case un-docked.
:param notebook: The GTK notebook where a tab-change occurred
:param page_num: The page number of the currently-selected tab
:param title_label: The label holding the notebook's title
:param window: The left-bar window, for which the title should be changed
:param notebook_identifier: A string identifying whether the notebook is the upper or the lower one | entailment |
def _on_key_press(self, widget, event):
"""Updates the currently pressed keys
In addition, the sidebars are toggled if <Ctrl><Tab> is pressed.
:param Gtk.Widget widget: The main window
:param Gdk.Event event: The key press event
"""
self.currently_pressed_keys.add(event.keyval)
if event.keyval in [Gdk.KEY_Tab, Gdk.KEY_ISO_Left_Tab] and event.state & Gdk.ModifierType.CONTROL_MASK:
self.toggle_sidebars() | Updates the currently pressed keys
In addition, the sidebars are toggled if <Ctrl><Tab> is pressed.
:param Gtk.Widget widget: The main window
:param Gdk.Event event: The key press event | entailment |
def prepare_destruction(self):
"""Saves current configuration of windows and panes to the runtime config file, before RAFCON is closed."""
plugins.run_hook("pre_destruction")
logger.debug("Saving runtime config to {0}".format(global_runtime_config.config_file_path))
# store pane last positions
for key, widget_name in constants.PANE_ID.items():
global_runtime_config.store_widget_properties(self.view[widget_name], key.replace('_POS', ''))
# store hidden or undocked widget flags correctly -> make them independent for restoring
for window_key in constants.UNDOCKABLE_WINDOW_KEYS:
hidden = False
if not global_runtime_config.get_config_value(window_key + "_WINDOW_UNDOCKED"):
hidden = getattr(self, window_key.lower() + '_hidden')
global_runtime_config.set_config_value(window_key + '_HIDDEN', hidden)
global_runtime_config.save_configuration()
# state-editor will relieve it's model => it won't observe the state machine manager any more
self.get_controller('states_editor_ctrl').prepare_destruction() # avoid new state editor TODO tbd (deleted)
rafcon.core.singleton.state_machine_manager.delete_all_state_machines()
rafcon.core.singleton.library_manager.prepare_destruction()
# gtkmvc installs a global glade custom handler that holds a reference to the last created View class,
# preventing it from being destructed. By installing a dummy callback handler, after all views have been
# created, the old handler is being removed and with it the reference, allowing all Views to be destructed.
# Gtk TODO: check if necessary and search for replacement
# try:
# from gtk import glade
# def dummy(*args, **kwargs):
# pass
# glade.set_custom_handler(dummy)
# except ImportError:
# pass
# Recursively destroys the main window
self.destroy()
from rafcon.gui.clipboard import global_clipboard
global_clipboard.destroy()
gui_singletons.main_window_controller = None | Saves current configuration of windows and panes to the runtime config file, before RAFCON is closed. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.