sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def log_to_raw_structure(execution_history_items):
"""
:param dict execution_history_items: history items, in the simplest case
directly the opened shelve log file
:return: start_item, the StateMachineStartItem of the log file
previous, a dict mapping history_item_id --> history_item_id of previous history item
next_, a dict mapping history_item_id --> history_item_id of the next history item (except if
next item is a concurrent execution branch)
concurrent, a dict mapping history_item_id --> []list of concurrent next history_item_ids
(if present)
grouped, a dict mapping run_id --> []list of history items with this run_id
:rtype: tuple
"""
previous = {}
next_ = {}
concurrent = {}
grouped_by_run_id = {}
start_item = None
for k,v in execution_history_items.items():
if v['item_type'] == 'StateMachineStartItem':
start_item = v
else:
# connect the item to its predecessor
prev_item_id = native_str(v['prev_history_item_id'])
if prev_item_id in execution_history_items:
## should always be the case except if shelve is broken/missing data
previous[k] = prev_item_id
if execution_history_items[prev_item_id]['item_type'] == 'ConcurrencyItem' and \
execution_history_items[k]['item_type'] != 'ReturnItem':
# this is not a return item, thus this 'previous' relationship of this
# item must be a call item of one of the concurrent branches of
# the concurrency state
if prev_item_id in concurrent:
concurrent[prev_item_id].append(k)
else:
concurrent[prev_item_id] = [k]
else:
# this is a logical 'next' relationship
next_[prev_item_id] = k
else:
logger.warning('HistoryItem is referring to a non-existing previous history item, HistoryItem was %s' % str(v))
rid = v['run_id']
if rid in grouped_by_run_id:
grouped_by_run_id[rid].append(v)
else:
grouped_by_run_id[rid] = [v]
return start_item, previous, next_, concurrent, grouped_by_run_id | :param dict execution_history_items: history items, in the simplest case
directly the opened shelve log file
:return: start_item, the StateMachineStartItem of the log file
previous, a dict mapping history_item_id --> history_item_id of previous history item
next_, a dict mapping history_item_id --> history_item_id of the next history item (except if
next item is a concurrent execution branch)
concurrent, a dict mapping history_item_id --> []list of concurrent next history_item_ids
(if present)
grouped, a dict mapping run_id --> []list of history items with this run_id
:rtype: tuple | entailment |
def log_to_collapsed_structure(execution_history_items, throw_on_pickle_error=True,
include_erroneous_data_ports=False, full_next=False):
"""
Collapsed structure means that all history items belonging to the same state execution are
merged together into one object (e.g. CallItem and ReturnItem of an ExecutionState). This
is based on the log structure in which all Items which belong together have the same run_id.
The collapsed items hold input as well as output data (direct and scoped), and the outcome
the state execution.
:param dict execution_history_items: history items, in the simplest case
directly the opened shelve log file
:param bool throw_on_pickle_error: flag if an error is thrown if an object cannot be un-pickled
:param bool include_erroneous_data_ports: flag if to include erroneous data ports
:param bool full_next: flag to indicate if the next relationship has also to be created at the end
of container states
:return: start_item, the StateMachineStartItem of the log file
next_, a dict mapping run_id --> run_id of the next executed state on the same
hierarchy level
concurrent, a dict mapping run_id --> []list of run_ids of the concurrent next
executed states (if present)
hierarchy, a dict mapping run_id --> run_id of the next executed state on the
deeper hierarchy level (the start state within that HierarchyState)
items, a dict mapping run_id --> collapsed representation of the execution of
the state with that run_id
:rtype: tuple
"""
# for debugging purposes
# execution_history_items_dict = dict()
# for k, v in execution_history_items.items():
# execution_history_items_dict[k] = v
start_item, previous, next_, concurrent, grouped = log_to_raw_structure(execution_history_items)
start_item = None
collapsed_next = {}
collapsed_concurrent ={}
collapsed_hierarchy = {}
collapsed_items = {}
# single state executions are not supported
if len(next_) == 0 or len(next_) == 1:
for rid, gitems in grouped.items():
if gitems[0]['item_type'] == 'StateMachineStartItem':
item = gitems[0]
execution_item = {}
## add base properties will throw if not existing
for l in ['description', 'path_by_name', 'state_name', 'run_id', 'state_type',
'path', 'timestamp', 'root_state_storage_id', 'state_machine_version',
'used_rafcon_version', 'creation_time', 'last_update', 'os_environment']:
try:
execution_item[l] = item[l]
except KeyError:
logger.warning("Key {} not in history start item".format(str(l)))
## add extended properties (added in later rafcon versions),
## will add default value if not existing instead
for l, default in [('semantic_data', {}),
('is_library', None),
('library_state_name', None),
('library_name', None),
('library_path', None)]:
execution_item[l] = item.get(l, default)
start_item = execution_item
return start_item, collapsed_next, collapsed_concurrent, collapsed_hierarchy, collapsed_items
# build collapsed items
for rid, gitems in grouped.items():
if gitems[0]['item_type'] == 'StateMachineStartItem':
item = gitems[0]
execution_item = {}
## add base properties will throw if not existing
for l in ['description', 'path_by_name', 'state_name', 'run_id', 'state_type',
'path', 'timestamp', 'root_state_storage_id', 'state_machine_version',
'used_rafcon_version', 'creation_time', 'last_update', 'os_environment']:
try:
execution_item[l] = item[l]
except KeyError:
logger.warning("Key {} not in history start item".format(str(l)))
## add extended properties (added in later rafcon versions),
## will add default value if not existing instead
for l, default in [('semantic_data', {}),
('is_library', None),
('library_state_name', None),
('library_name', None),
('library_path', None)]:
execution_item[l] = item.get(l, default)
start_item = execution_item
collapsed_next[rid] = execution_history_items[next_[gitems[0]['history_item_id']]]['run_id']
collapsed_items[rid] = execution_item
elif gitems[0]['state_type'] == 'ExecutionState' or \
gitems[0]['state_type'] == 'HierarchyState' or \
gitems[0]['state_type'] == 'LibraryState' or \
'Concurrency' in gitems[0]['state_type']:
# for item in gitems:
# if item["description"] is not None:
# print(item["item_type"], item["call_type"], item["state_type"], item["state_name"])
# print(item["description"])
# select call and return items for this state
try:
call_item = gitems[[gitems[i]['item_type'] == 'CallItem' and \
gitems[i]['call_type'] == 'EXECUTE' \
for i in range(len(gitems))].index(True)]
except ValueError:
# fall back to container call, should only happen for root state
try:
call_item = gitems[[gitems[i]['item_type'] == 'CallItem' and \
gitems[i]['call_type'] == 'CONTAINER' \
for i in range(len(gitems))].index(True)]
except ValueError:
logger.warning('Could not find a CallItem in run_id group %s\nThere will probably be log information missing on this execution branch!' % str(rid))
## create dummy returnitem with the properties referenced later in this code
call_item = dict(description=None,
history_item_id=None,
path_by_name=None,
state_name=None,
run_id=None,
state_type=None,
path=None,
timestamp=None,
input_output_data={},
scoped_data={})
try:
return_item = gitems[[gitems[i]['item_type'] == 'ReturnItem' and \
gitems[i]['call_type'] == 'EXECUTE' \
for i in range(len(gitems))].index(True)]
except ValueError:
# fall back to container call, should only happen for root state
try:
return_item = gitems[[gitems[i]['item_type'] == 'ReturnItem' and \
gitems[i]['call_type'] == 'CONTAINER' \
for i in range(len(gitems))].index(True)]
except ValueError:
logger.warning('Could not find a ReturnItem in run_id group %s\nThere will probably be log information missing on this execution branch!' % str(rid))
## create dummy returnitem with the properties referenced later in this code
return_item = dict(history_item_id=None,
outcome_name=None,
outcome_id=None,
timestamp=None,
input_output_data={},
scoped_data={})
# next item (on same hierarchy level) is always after return item
if return_item['history_item_id'] in next_:
# no next relationship at the end of containers
if execution_history_items[next_[return_item['history_item_id']]]['state_type'] == 'HierarchyState' and execution_history_items[next_[return_item['history_item_id']]]['item_type'] == 'ReturnItem' and execution_history_items[next_[return_item['history_item_id']]]['call_type'] == 'CONTAINER':
if full_next:
collapsed_next[rid] = execution_history_items[next_[return_item['history_item_id']]]['run_id']
else:
pass
else:
collapsed_next[rid] = execution_history_items[next_[return_item['history_item_id']]]['run_id']
# treat hierarchy level
if call_item['history_item_id'] in previous:
if execution_history_items[previous[call_item['history_item_id']]]['state_type'] == 'HierarchyState' and execution_history_items[previous[call_item['history_item_id']]]['item_type'] == 'CallItem':
prev_rid = execution_history_items[previous[call_item['history_item_id']]]['run_id']
collapsed_hierarchy[prev_rid] = rid
# treat concurrency level
if execution_history_items[previous[call_item['history_item_id']]]['item_type'] == 'ConcurrencyItem':
prev_rid = execution_history_items[previous[call_item['history_item_id']]]['run_id']
if prev_rid in collapsed_concurrent:
collapsed_concurrent[prev_rid].append(rid)
else:
collapsed_concurrent[prev_rid] = [rid]
# assemble grouped item
execution_item = {}
## add base properties will throw if not existing
for l in ['description', 'path_by_name', 'state_name', 'run_id', 'state_type', 'path']:
execution_item[l] = call_item[l]
## add extended properties (added in later rafcon versions),
## will add default value if not existing instead
for l, default in [('semantic_data', {}),
('is_library', None),
('library_state_name', None),
('library_name', None),
('library_path', None)]:
execution_item[l] = return_item.get(l, default)
for l in ['outcome_name', 'outcome_id']:
execution_item[l] = return_item[l]
for l in ['timestamp']:
execution_item[l+'_call'] = call_item[l]
execution_item[l+'_return'] = return_item[l]
def unpickle_data(data_dict):
r = dict()
# support backward compatibility
if isinstance(data_dict, string_types): # formerly data dict was a json string
r = json.loads(data_dict)
else:
for k, v in data_dict.items():
if not k.startswith('!'): # ! indicates storage error
try:
r[k] = pickle.loads(v)
except Exception as e:
if throw_on_pickle_error:
raise
elif include_erroneous_data_ports:
r['!' + k] = (str(e), v)
else:
pass # ignore
elif include_erroneous_data_ports:
r[k] = v
return r
execution_item['data_ins'] = unpickle_data(call_item['input_output_data'])
execution_item['data_outs'] = unpickle_data(return_item['input_output_data'])
execution_item['scoped_data_ins'] = unpickle_data(call_item['scoped_data'])
execution_item['scoped_data_outs'] = unpickle_data(return_item['scoped_data'])
execution_item['semantic_data'] = unpickle_data(execution_item['semantic_data'])
collapsed_items[rid] = execution_item
return start_item, collapsed_next, collapsed_concurrent, collapsed_hierarchy, collapsed_items | Collapsed structure means that all history items belonging to the same state execution are
merged together into one object (e.g. CallItem and ReturnItem of an ExecutionState). This
is based on the log structure in which all Items which belong together have the same run_id.
The collapsed items hold input as well as output data (direct and scoped), and the outcome
the state execution.
:param dict execution_history_items: history items, in the simplest case
directly the opened shelve log file
:param bool throw_on_pickle_error: flag if an error is thrown if an object cannot be un-pickled
:param bool include_erroneous_data_ports: flag if to include erroneous data ports
:param bool full_next: flag to indicate if the next relationship has also to be created at the end
of container states
:return: start_item, the StateMachineStartItem of the log file
next_, a dict mapping run_id --> run_id of the next executed state on the same
hierarchy level
concurrent, a dict mapping run_id --> []list of run_ids of the concurrent next
executed states (if present)
hierarchy, a dict mapping run_id --> run_id of the next executed state on the
deeper hierarchy level (the start state within that HierarchyState)
items, a dict mapping run_id --> collapsed representation of the execution of
the state with that run_id
:rtype: tuple | entailment |
def log_to_DataFrame(execution_history_items, data_in_columns=[], data_out_columns=[], scoped_in_columns=[],
scoped_out_columns=[], semantic_data_columns=[], throw_on_pickle_error=True):
"""
Returns all collapsed items in a table-like structure (pandas.DataFrame) with one row per executed
state and a set of properties resp. columns (e.g. state_name, outcome, run_id) for this state.
The data flow (data_in/out, scoped_data_in/out, semantic_data) is omitted from this table
representation by default, as the different states have different data in-/out-port, scoped_data-
ports and semantic_data defined. However, you can ask specific data-/scoped_data-ports and semantic
data to be exported as table column, given they are primitive-valued, by including the port / key
names in the *_selected-parameters. These table-columns will obviously only be well-defined for
states having this kind of port-name-/semantic-key and otherwise will contain a None-like value,
indicating missing data.
The available data per execution item (row in the table) can be printed using pandas.DataFrame.columns.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("The Python package 'pandas' is required for log_to_DataFrame.")
start, next_, concurrency, hierarchy, gitems = log_to_collapsed_structure(
execution_history_items, throw_on_pickle_error=throw_on_pickle_error)
gitems.pop(start['run_id'])
if len(gitems) == 0:
return pd.DataFrame()
# remove columns which are not generic over all states (basically the
# data flow stuff)
df_keys = list(list(gitems.values())[0].keys())
df_keys.remove('data_ins')
df_keys.remove('data_outs')
df_keys.remove('scoped_data_ins')
df_keys.remove('scoped_data_outs')
df_keys.remove('semantic_data')
df_keys.sort()
df_items = []
for rid, item in gitems.items():
row_data = [item[k] for k in df_keys]
for key, selected_columns in [('data_ins', data_in_columns),
('data_outs', data_out_columns),
('scoped_data_ins', scoped_in_columns),
('scoped_data_outs', scoped_out_columns),
('semantic_data', semantic_data_columns)]:
for column_key in selected_columns:
row_data.append(item[key].get(column_key, None))
df_items.append(row_data)
for key, selected_columns in [('data_ins', data_in_columns),
('data_outs', data_out_columns),
('scoped_data_ins', scoped_in_columns),
('scoped_data_outs', scoped_out_columns),
('semantic_data', semantic_data_columns)]:
df_keys.extend([key + '__' + s for s in selected_columns])
df = pd.DataFrame(df_items, columns=df_keys)
# convert epoch to datetime
df.timestamp_call = pd.to_datetime(df.timestamp_call, unit='s')
df.timestamp_return = pd.to_datetime(df.timestamp_return, unit='s')
# use call timestamp as index
df_timed = df.set_index(df.timestamp_call)
df_timed.sort_index(inplace=True)
return df_timed | Returns all collapsed items in a table-like structure (pandas.DataFrame) with one row per executed
state and a set of properties resp. columns (e.g. state_name, outcome, run_id) for this state.
The data flow (data_in/out, scoped_data_in/out, semantic_data) is omitted from this table
representation by default, as the different states have different data in-/out-port, scoped_data-
ports and semantic_data defined. However, you can ask specific data-/scoped_data-ports and semantic
data to be exported as table column, given they are primitive-valued, by including the port / key
names in the *_selected-parameters. These table-columns will obviously only be well-defined for
states having this kind of port-name-/semantic-key and otherwise will contain a None-like value,
indicating missing data.
The available data per execution item (row in the table) can be printed using pandas.DataFrame.columns. | entailment |
def log_to_ganttplot(execution_history_items):
"""
Example how to use the DataFrame representation
"""
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
d = log_to_DataFrame(execution_history_items)
# de-duplicate states and make mapping from state to idx
unique_states, idx = np.unique(d.path_by_name, return_index=True)
ordered_unique_states = np.array(d.path_by_name)[np.sort(idx)]
name2idx = {k: i for i, k in enumerate(ordered_unique_states)}
calldate = dates.date2num(d.timestamp_call.dt.to_pydatetime())
returndate = dates.date2num(d.timestamp_return.dt.to_pydatetime())
state2color = {'HierarchyState': 'k',
'ExecutionState': 'g',
'BarrierConcurrencyState': 'y',
'PreemptiveConcurrencyState': 'y'}
fig, ax = plt.subplots(1, 1)
ax.barh(bottom=[name2idx[k] for k in d.path_by_name], width=returndate-calldate,
left=calldate, align='center', color=[state2color[s] for s in d.state_type], lw=0.0)
plt.yticks(list(range(len(ordered_unique_states))), ordered_unique_states) | Example how to use the DataFrame representation | entailment |
def call_gui_callback(callback, *args, **kwargs):
"""Wrapper method for GLib.idle_add
This method is intended as replacement for idle_add. It wraps the method with a callback option. The advantage is
that this way, the call is blocking. The method return, when the callback method has been called and executed.
:param callback: The callback method, e.g. on_open_activate
:param args: The parameters to be passed to the callback method
"""
from future.utils import raise_
from threading import Condition
import sys
from rafcon.utils import log
global exception_info, result
from gi.repository import GLib
condition = Condition()
exception_info = None
@log.log_exceptions()
def fun():
"""Call callback and notify condition variable
"""
global exception_info, result
result = None
try:
result = callback(*args)
except:
# Exception within this asynchronously called function won't reach pytest. This is why we have to store
# the information about the exception to re-raise it at the end of the synchronous call.
exception_info = sys.exc_info()
finally: # Finally is also executed in the case of exceptions
condition.acquire()
condition.notify()
condition.release()
if "priority" in kwargs:
priority = kwargs["priority"]
else:
priority = GLib.PRIORITY_LOW
condition.acquire()
GLib.idle_add(fun, priority=priority)
# Wait for the condition to be notified
# TODO: implement timeout that raises an exception
condition.wait()
condition.release()
if exception_info:
e_type, e_value, e_traceback = exception_info
raise_(e_type, e_value, e_traceback)
return result | Wrapper method for GLib.idle_add
This method is intended as replacement for idle_add. It wraps the method with a callback option. The advantage is
that this way, the call is blocking. The method return, when the callback method has been called and executed.
:param callback: The callback method, e.g. on_open_activate
:param args: The parameters to be passed to the callback method | entailment |
def create_tab_header_label(tab_name, icons):
"""Create the tab header labels for notebook tabs. If USE_ICONS_AS_TAB_LABELS is set to True in the gui_config,
icons are used as headers. Otherwise, the titles of the tabs are rotated by 90 degrees.
:param tab_name: The label text of the tab, written in small letters and separated by underscores, e.g. states_tree
:param icons: A dict mapping each tab_name to its corresponding icon
:return: The GTK Eventbox holding the tab label
"""
tooltip_event_box = Gtk.EventBox()
tooltip_event_box.set_tooltip_text(tab_name)
tab_label = Gtk.Label()
if global_gui_config.get_config_value('USE_ICONS_AS_TAB_LABELS', True):
tab_label.set_markup('<span font_desc="%s %s">&#x%s;</span>' %
(constants.ICON_FONT,
constants.FONT_SIZE_BIG,
icons[tab_name]))
else:
tab_label.set_text(get_widget_title(tab_name))
tab_label.set_angle(90)
tab_label.show()
tooltip_event_box.add(tab_label)
tooltip_event_box.set_visible_window(False)
tooltip_event_box.show()
return tooltip_event_box | Create the tab header labels for notebook tabs. If USE_ICONS_AS_TAB_LABELS is set to True in the gui_config,
icons are used as headers. Otherwise, the titles of the tabs are rotated by 90 degrees.
:param tab_name: The label text of the tab, written in small letters and separated by underscores, e.g. states_tree
:param icons: A dict mapping each tab_name to its corresponding icon
:return: The GTK Eventbox holding the tab label | entailment |
def create_button_label(icon, font_size=constants.FONT_SIZE_NORMAL):
"""Create a button label with a chosen icon.
:param icon: The icon
:param font_size: The size of the icon
:return: The created label
"""
label = Gtk.Label()
set_label_markup(label, '&#x' + icon + ';', constants.ICON_FONT, font_size)
label.show()
return label | Create a button label with a chosen icon.
:param icon: The icon
:param font_size: The size of the icon
:return: The created label | entailment |
def get_widget_title(tab_label_text):
"""Transform Notebook tab label to title by replacing underscores with white spaces and capitalizing the first
letter of each word.
:param tab_label_text: The string of the tab label to be transformed
:return: The transformed title as a string
"""
title = ''
title_list = tab_label_text.split('_')
for word in title_list:
title += word.upper() + ' '
title.strip()
return title | Transform Notebook tab label to title by replacing underscores with white spaces and capitalizing the first
letter of each word.
:param tab_label_text: The string of the tab label to be transformed
:return: The transformed title as a string | entailment |
def get_notebook_tab_title(notebook, page_num):
"""Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab
"""
child = notebook.get_nth_page(page_num)
tab_label_eventbox = notebook.get_tab_label(child)
return get_widget_title(tab_label_eventbox.get_tooltip_text()) | Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab | entailment |
def set_notebook_title(notebook, page_num, title_label):
"""Set the title of a GTK notebook to one of its tab's titles
:param notebook: The GTK notebook
:param page_num: The page number of a specific tab
:param title_label: The GTK label holding the notebook's title
:return: The new title of the notebook
"""
text = get_notebook_tab_title(notebook, page_num)
set_label_markup(title_label, text, constants.INTERFACE_FONT, constants.FONT_SIZE_BIG, constants.LETTER_SPACING_1PT)
return text | Set the title of a GTK notebook to one of its tab's titles
:param notebook: The GTK notebook
:param page_num: The page number of a specific tab
:param title_label: The GTK label holding the notebook's title
:return: The new title of the notebook | entailment |
def create_menu_box_with_icon_and_label(label_text):
""" Creates a MenuItem box, which is a replacement for the former ImageMenuItem. The box contains, a label
for the icon and one for the text.
:param label_text: The text, which is displayed for the text label
:return:
"""
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 10)
box.set_border_width(0)
icon_label = Gtk.Label()
text_label = Gtk.AccelLabel.new(label_text)
text_label.set_xalign(0)
box.pack_start(icon_label, False, False, 0)
box.pack_start(text_label, True, True, 0)
return box, icon_label, text_label | Creates a MenuItem box, which is a replacement for the former ImageMenuItem. The box contains, a label
for the icon and one for the text.
:param label_text: The text, which is displayed for the text label
:return: | entailment |
def set_window_size_and_position(window, window_key):
"""Adjust GTK Window's size, position and maximized state according to the corresponding values in the
runtime_config file. The maximize method is triggered last to restore also the last stored size and position of the
window. If the runtime_config does not exist, or the corresponding values are missing in the file, default values
for the window size are used, and the mouse position is used to adjust the window's position.
:param window: The GTK Window to be adjusted
:param window_key: The window's key stored in the runtime config file
"""
size = global_runtime_config.get_config_value(window_key + '_WINDOW_SIZE')
position = global_runtime_config.get_config_value(window_key + '_WINDOW_POS')
maximized = global_runtime_config.get_config_value(window_key + '_WINDOW_MAXIMIZED')
# un-maximize here on purpose otherwise resize and reposition fails
if not maximized:
window.unmaximize()
if not size:
size = constants.WINDOW_SIZE[window_key + '_WINDOW']
window.resize(*size)
if position:
position = (max(0, position[0]), max(0, position[1]))
screen_width = Gdk.Screen.width()
screen_height = Gdk.Screen.height()
if position[0] < screen_width and position[1] < screen_height:
window.move(*position)
else:
window.set_position(Gtk.WindowPosition.MOUSE)
if maximized:
window.maximize()
window.show() | Adjust GTK Window's size, position and maximized state according to the corresponding values in the
runtime_config file. The maximize method is triggered last to restore also the last stored size and position of the
window. If the runtime_config does not exist, or the corresponding values are missing in the file, default values
for the window size are used, and the mouse position is used to adjust the window's position.
:param window: The GTK Window to be adjusted
:param window_key: The window's key stored in the runtime config file | entailment |
def react_to_event(view, widget, event):
"""Checks whether the widget is supposed to react to passed event
The function is intended for callback methods registering to shortcut actions. As several widgets can register to
the same shortcut, only the one having the focus should react to it.
:param gtkmvc3.View view: The view in which the widget is registered
:param Gtk.Widget widget: The widget that subscribed to the shortcut action, should be the top widget of the view
:param event: The event that caused the callback
:return: Whether the widget is supposed to react to the event or not
:rtype: bool
"""
# See
# http://pyGtk.org/pygtk2reference/class-gtkwidget.html#method-gtkwidget--is-focus and
# http://pyGtk.org/pygtk2reference/class-gtkwidget.html#method-gtkwidget--has-focus
# for detailed information about the difference between is_focus() and has_focus()
if not view: # view needs to be initialized
return False
# widget parameter must be set and a Gtk.Widget
if not isinstance(widget, Gtk.Widget):
return False
# Either the widget itself or one of its children must be the focus widget within their toplevel
child_is_focus = False if not isinstance(widget, Gtk.Container) else bool(widget.get_focus_child())
if not child_is_focus and not widget.is_focus():
return False
def has_focus(widget):
"""Checks whether `widget` or one of its children ``has_focus()`` is ``True``
:param Gtk.Widget widget: The widget to be checked
:return: If any (child) widget has the global input focus
"""
if widget.has_focus():
return True
if not isinstance(widget, Gtk.Container):
return False
return any(has_focus(child) for child in widget.get_children())
# Either, for any of widget or its children, has_focus must be True, in this case the widget has the global focus.
if has_focus(widget):
return True
# Or the callback was not triggered by a shortcut, but e.g. a mouse click or a call from a test.
# If the callback was triggered by a shortcut action, the event has at least a length of two and the second
# element is a Gdk.ModifierType
if len(event) < 2 or (len(event) >= 2 and not isinstance(event[1], Gdk.ModifierType)):
return True
return False | Checks whether the widget is supposed to react to passed event
The function is intended for callback methods registering to shortcut actions. As several widgets can register to
the same shortcut, only the one having the focus should react to it.
:param gtkmvc3.View view: The view in which the widget is registered
:param Gtk.Widget widget: The widget that subscribed to the shortcut action, should be the top widget of the view
:param event: The event that caused the callback
:return: Whether the widget is supposed to react to the event or not
:rtype: bool | entailment |
def is_event_of_key_string(event, key_string):
"""Condition check if key string represent the key value of handed event and whether the event is of right type
The function checks for constructed event tuple that are generated by the rafcon.gui.shortcut_manager.ShortcutManager.
:param tuple event: Event tuple generated by the ShortcutManager
:param str key_string: Key string parsed to a key value and for condition check
"""
return len(event) >= 2 and not isinstance(event[1], Gdk.ModifierType) and event[0] == Gtk.accelerator_parse(key_string)[0] | Condition check if key string represent the key value of handed event and whether the event is of right type
The function checks for constructed event tuple that are generated by the rafcon.gui.shortcut_manager.ShortcutManager.
:param tuple event: Event tuple generated by the ShortcutManager
:param str key_string: Key string parsed to a key value and for condition check | entailment |
def register_view(self, view):
"""Called when the View was registered"""
super(TopToolBarController, self).register_view(view)
view['maximize_button'].connect('clicked', self.on_maximize_button_clicked)
self.update_maximize_button() | Called when the View was registered | entailment |
def register_view(self, view):
"""Called when the View was registered"""
super(TopToolBarUndockedWindowController, self).register_view(view)
view['redock_button'].connect('clicked', self.on_redock_button_clicked) | Called when the View was registered | entailment |
def register_view(self, view):
"""Called when the View was registered
Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application
"""
super(SingleWidgetWindowController, self).register_view(view)
self.shortcut_manager = ShortcutManager(self.view['main_window'])
self.register_actions(self.shortcut_manager)
view['main_window'].connect('destroy', Gtk.main_quit) | Called when the View was registered
Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application | entailment |
def avoid_parallel_execution(func):
"""A decorator to avoid the parallel execution of a function.
If the function is currently called, the second call is just skipped.
:param func: The function to decorate
:return:
"""
def func_wrapper(*args, **kwargs):
if not getattr(func, "currently_executing", False):
func.currently_executing = True
try:
return func(*args, **kwargs)
finally:
func.currently_executing = False
else:
logger.verbose("Avoid parallel execution of function {}".format(func))
return func_wrapper | A decorator to avoid the parallel execution of a function.
If the function is currently called, the second call is just skipped.
:param func: The function to decorate
:return: | entailment |
def run(self):
""" This defines the sequence of actions that are taken when the library state is executed
It basically just calls the run method of the container state
:return:
"""
self.state_execution_status = StateExecutionStatus.ACTIVE
logger.debug("Entering library state '{0}' with name '{1}'".format(self.library_name, self.name))
# self.state_copy.parent = self.parent
self.state_copy._run_id = self._run_id
self.state_copy.input_data = self.input_data
self.state_copy.output_data = self.output_data
self.state_copy.execution_history = self.execution_history
self.state_copy.backward_execution = self.backward_execution
self.state_copy.run()
logger.debug("Exiting library state '{0}' with name '{1}'".format(self.library_name, self.name))
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
self.finalize(self.state_copy.final_outcome) | This defines the sequence of actions that are taken when the library state is executed
It basically just calls the run method of the container state
:return: | entailment |
def remove_outcome(self, outcome_id, force=False, destroy=True):
"""Overwrites the remove_outcome method of the State class. Prevents user from removing a
outcome from the library state.
For further documentation, look at the State class.
:raises exceptions.NotImplementedError: in any case
"""
if force:
return State.remove_outcome(self, outcome_id, force, destroy)
else:
raise NotImplementedError("Remove outcome is not implemented for library state {}".format(self)) | Overwrites the remove_outcome method of the State class. Prevents user from removing a
outcome from the library state.
For further documentation, look at the State class.
:raises exceptions.NotImplementedError: in any case | entailment |
def remove_output_data_port(self, data_port_id, force=False, destroy=True):
"""Overwrites the remove_output_data_port method of the State class. Prevents user from removing a
output data port from the library state.
For further documentation, look at the State class.
:param bool force: True if the removal should be forced
:raises exceptions.NotImplementedError: in the removal is not forced
"""
if force:
return State.remove_output_data_port(self, data_port_id, force, destroy)
else:
raise NotImplementedError("Remove output data port is not implemented for library state {}".format(self)) | Overwrites the remove_output_data_port method of the State class. Prevents user from removing a
output data port from the library state.
For further documentation, look at the State class.
:param bool force: True if the removal should be forced
:raises exceptions.NotImplementedError: in the removal is not forced | entailment |
def library_hierarchy_depth(self):
""" Calculates the library hierarchy depth
Counting starts at the current library state. So if the there is no upper library state the depth is one.
:return: library hierarchy depth
:rtype: int
"""
current_library_hierarchy_depth = 1
library_root_state = self.get_next_upper_library_root_state()
while library_root_state is not None:
current_library_hierarchy_depth += 1
library_root_state = library_root_state.parent.get_next_upper_library_root_state()
return current_library_hierarchy_depth | Calculates the library hierarchy depth
Counting starts at the current library state. So if the there is no upper library state the depth is one.
:return: library hierarchy depth
:rtype: int | entailment |
def load(self, config_file=None, path=None):
"""Loads the configuration from a specific file
:param config_file: the name of the config file
:param path: the path to the config file
"""
if config_file is None:
if path is None:
path, config_file = split(resource_filename(__name__, CONFIG_FILE))
else:
config_file = CONFIG_FILE
super(Config, self).load(config_file, path) | Loads the configuration from a specific file
:param config_file: the name of the config file
:param path: the path to the config file | entailment |
def register_view(self, view):
"""Called when the View was registered
Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application
"""
super(StateEditorController, self).register_view(view)
view.prepare_the_labels() # the preparation of the labels is done here to take into account plugin hook changes
view['add_input_port_button'].connect('clicked', self.inputs_ctrl.on_add)
view['add_output_port_button'].connect('clicked', self.outputs_ctrl.on_add)
if isinstance(self.model, ContainerStateModel):
view['add_scoped_variable_button'].connect('clicked', self.scopes_ctrl.on_add)
view['remove_input_port_button'].connect('clicked', self.inputs_ctrl.on_remove)
view['remove_output_port_button'].connect('clicked', self.outputs_ctrl.on_remove)
if isinstance(self.model, ContainerStateModel):
view['remove_scoped_variable_button'].connect('clicked', self.scopes_ctrl.on_remove)
if isinstance(self.model, LibraryStateModel) or self.model.state.get_next_upper_library_root_state():
view['add_input_port_button'].set_sensitive(False)
view['remove_input_port_button'].set_sensitive(False)
view['add_output_port_button'].set_sensitive(False)
view['remove_output_port_button'].set_sensitive(False)
view['add_scoped_variable_button'].set_sensitive(False)
view['remove_scoped_variable_button'].set_sensitive(False)
view.inputs_view.show()
view.outputs_view.show()
view.scopes_view.show()
view.outcomes_view.show()
view.transitions_view.show()
view.data_flows_view.show()
# show scoped variables if show content is enabled -> if disabled the tab stays and indicates a container state
if isinstance(self.model, LibraryStateModel) and not self.model.show_content():
view.scopes_view.hide()
view.linkage_overview.scope_view.hide()
# Container states do not have a source editor and library states does not show there source code
# Thus, for those states we do not have to add the source controller and can hide the source code tab
# logger.info("init state: {0}".format(model))
lib_with_and_ES_as_root = isinstance(self.model, LibraryStateModel) and \
not isinstance(self.model.state_copy, ContainerStateModel)
if not isinstance(self.model, ContainerStateModel) and not isinstance(self.model, LibraryStateModel) or \
lib_with_and_ES_as_root:
view.source_view.show()
if isinstance(self.model, LibraryStateModel) and not self.model.show_content():
view.remove_source_tab()
view.remove_scoped_variables_tab()
else:
view.scopes_view.show()
if isinstance(self.model, LibraryStateModel) and \
(not self.model.show_content() or not isinstance(self.model.state_copy, ContainerStateModel)):
view.remove_scoped_variables_tab()
view.remove_source_tab()
if global_gui_config.get_config_value("SEMANTIC_DATA_MODE", False):
view.bring_tab_to_the_top('Semantic Data')
else:
if isinstance(self.model.state, LibraryState):
view.bring_tab_to_the_top('Description')
else:
view.bring_tab_to_the_top('Linkage Overview')
if isinstance(self.model, ContainerStateModel):
self.scopes_ctrl.reload_scoped_variables_list_store()
plugins.run_hook("post_state_editor_register_view", self) | Called when the View was registered
Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application | entailment |
def state_type_changed(self, model, prop_name, info):
"""Reopen state editor when state type is changed
When the type of the observed state changes, a new model is created. The look of this controller's view
depends on the kind of model. Therefore, we have to destroy this editor and open a new one with the new model.
"""
msg = info['arg']
# print(self.__class__.__name__, "state_type_changed check", info)
if msg.action in ['change_state_type', 'change_root_state_type'] and msg.after:
# print(self.__class__.__name__, "state_type_changed")
import rafcon.gui.singleton as gui_singletons
msg = info['arg']
new_state_m = msg.affected_models[-1]
states_editor_ctrl = gui_singletons.main_window_controller.get_controller('states_editor_ctrl')
states_editor_ctrl.recreate_state_editor(self.model, new_state_m) | Reopen state editor when state type is changed
When the type of the observed state changes, a new model is created. The look of this controller's view
depends on the kind of model. Therefore, we have to destroy this editor and open a new one with the new model. | entailment |
def state_destruction(self, model, prop_name, info):
""" Close state editor when state is being destructed """
import rafcon.gui.singleton as gui_singletons
states_editor_ctrl = gui_singletons.main_window_controller.get_controller('states_editor_ctrl')
state_identifier = states_editor_ctrl.get_state_identifier(self.model)
states_editor_ctrl.close_page(state_identifier, delete=True) | Close state editor when state is being destructed | entailment |
def prepare_destruction(self):
"""Prepares the model for destruction
Unregister itself as observer from the state machine and the root state
"""
if self.state_machine is None:
logger.verbose("Multiple calls of prepare destruction for {0}".format(self))
self.destruction_signal.emit()
if self.history is not None:
self.history.prepare_destruction()
if self.auto_backup is not None:
self.auto_backup.prepare_destruction()
try:
self.unregister_observer(self)
self.root_state.unregister_observer(self)
except KeyError: # Might happen if the observer was already unregistered
pass
with self.state_machine.modification_lock():
self.root_state.prepare_destruction()
self.root_state = None
self.state_machine = None
super(StateMachineModel, self).prepare_destruction() | Prepares the model for destruction
Unregister itself as observer from the state machine and the root state | entailment |
def meta_changed(self, model, prop_name, info):
"""When the meta was changed, we have to set the dirty flag, as the changes are unsaved"""
self.state_machine.marked_dirty = True
msg = info.arg
if model is not self and msg.change.startswith('sm_notification_'): # Signal was caused by the root state
# Emit state_meta_signal to inform observing controllers about changes made to the meta data within the
# state machine
# -> removes mark of "sm_notification_"-prepend to mark root-state msg forwarded to state machine label
msg = msg._replace(change=msg.change.replace('sm_notification_', '', 1))
self.state_meta_signal.emit(msg) | When the meta was changed, we have to set the dirty flag, as the changes are unsaved | entailment |
def action_signal_triggered(self, model, prop_name, info):
"""When the action was performed, we have to set the dirty flag, as the changes are unsaved"""
# print("ACTION_signal_triggered state machine: ", model, prop_name, info)
self.state_machine.marked_dirty = True
msg = info.arg
if model is not self and msg.action.startswith('sm_notification_'): # Signal was caused by the root state
# Emit state_action_signal to inform observing controllers about changes made to the state within the
# state machine
# print("DONE1 S", self.state_machine.state_machine_id, msg, model)
# -> removes mark of "sm_notification_"-prepend to mark root-state msg forwarded to state machine label
msg = msg._replace(action=msg.action.replace('sm_notification_', '', 1))
self.state_action_signal.emit(msg)
# print("FINISH DONE1 S", self.state_machine.state_machine_id, msg)
else:
# print("DONE2 S", self.state_machine.state_machine_id, msg)
pass | When the action was performed, we have to set the dirty flag, as the changes are unsaved | entailment |
def get_state_model_by_path(self, path):
"""Returns the `StateModel` for the given `path`
Searches a `StateModel` in the state machine, who's path is given by `path`.
:param str path: Path of the searched state
:return: The state with that path
:rtype: StateModel
:raises: ValueError, if path is invalid/not existing with this state machine
"""
path_elements = path.split('/')
path_elements.pop(0)
current_state_model = self.root_state
for state_id in path_elements:
if isinstance(current_state_model, ContainerStateModel):
if state_id in current_state_model.states:
current_state_model = current_state_model.states[state_id]
else:
raise ValueError("Invalid path: State with id '{}' not found in state with id {}".format(
state_id, current_state_model.state.state_id))
elif isinstance(current_state_model, LibraryStateModel):
if state_id == current_state_model.state_copy.state.state_id:
current_state_model = current_state_model.state_copy
else:
raise ValueError("Invalid path: state id '{}' does not coincide with state id '{}' of state_copy "
"of library state with id '{}'".format(
state_id, current_state_model.state_copy.state.state_id,
current_state_model.state.state_id))
else:
raise ValueError("Invalid path: State with id '{}' has no children".format(
current_state_model.state.state_id))
return current_state_model | Returns the `StateModel` for the given `path`
Searches a `StateModel` in the state machine, who's path is given by `path`.
:param str path: Path of the searched state
:return: The state with that path
:rtype: StateModel
:raises: ValueError, if path is invalid/not existing with this state machine | entailment |
def load_meta_data(self, path=None, recursively=True):
"""Load meta data of state machine model from the file system
The meta data of the state machine model is loaded from the file system and stored in the meta property of the
model. Existing meta data is removed. Also the meta data of root state and children is loaded.
:param str path: Optional path to the meta data file. If not given, the path will be derived from the state
machine's path on the filesystem
"""
meta_data_path = path if path is not None else self.state_machine.file_system_path
if meta_data_path:
path_meta_data = os.path.join(meta_data_path, storage.FILE_NAME_META_DATA)
try:
tmp_meta = storage.load_data_file(path_meta_data)
except ValueError:
tmp_meta = {}
else:
tmp_meta = {}
# JSON returns a dict, which must be converted to a Vividict
tmp_meta = Vividict(tmp_meta)
if recursively:
root_state_path = None if not path else os.path.join(path, self.root_state.state.state_id)
self.root_state.load_meta_data(root_state_path)
if tmp_meta:
# assign the meta data to the state
self.meta = tmp_meta
self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True)) | Load meta data of state machine model from the file system
The meta data of the state machine model is loaded from the file system and stored in the meta property of the
model. Existing meta data is removed. Also the meta data of root state and children is loaded.
:param str path: Optional path to the meta data file. If not given, the path will be derived from the state
machine's path on the filesystem | entailment |
def store_meta_data(self, copy_path=None):
"""Save meta data of the state machine model to the file system
This method generates a dictionary of the meta data of the state machine and stores it on the filesystem.
:param str copy_path: Optional, if the path is specified, it will be used instead of the file system path
"""
if copy_path:
meta_file_json = os.path.join(copy_path, storage.FILE_NAME_META_DATA)
else:
meta_file_json = os.path.join(self.state_machine.file_system_path, storage.FILE_NAME_META_DATA)
storage_utils.write_dict_to_json(self.meta, meta_file_json)
self.root_state.store_meta_data(copy_path) | Save meta data of the state machine model to the file system
This method generates a dictionary of the meta data of the state machine and stores it on the filesystem.
:param str copy_path: Optional, if the path is specified, it will be used instead of the file system path | entailment |
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees) | Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options. | entailment |
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename | Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed. | entailment |
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename) | Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately). | entailment |
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc)) | Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid). | entailment |
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename) | Ensure that jar_filename is a valid path to a jar file. | entailment |
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename) | Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION. | entailment |
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend) | This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default. | entailment |
def convert_tree(self, ptb_tree, representation='basic',
include_punct=True, include_erased=False,
add_lemmas=False, universal=True):
"""Arguments are as in StanfordDependencies.convert_trees but with
the addition of add_lemmas. If add_lemmas=True, we will run the
Stanford CoreNLP lemmatizer and fill in the lemma field."""
self._raise_on_bad_input(ptb_tree)
self._raise_on_bad_representation(representation)
tree = self.treeReader(ptb_tree)
if tree is None:
raise ValueError("Invalid Penn Treebank tree: %r" % ptb_tree)
deps = self._get_deps(tree, include_punct, representation,
universal=universal)
tagged_yield = self._listify(tree.taggedYield())
indices_to_words = dict(enumerate(tagged_yield, 1))
sentence = Sentence()
covered_indices = set()
def add_token(index, form, head, deprel, extra):
tag = indices_to_words[index].tag()
if add_lemmas:
lemma = self.stem(form, tag)
else:
lemma = None
token = Token(index=index, form=form, lemma=lemma, cpos=tag,
pos=tag, feats=None, head=head, deprel=deprel,
phead=None, pdeprel=None, extra=extra)
sentence.append(token)
# add token for each dependency
for dep in deps:
index = dep.dep().index()
head = dep.gov().index()
deprel = dep.reln().toString()
form = indices_to_words[index].value()
dep_is_copy = dep.dep().copyCount()
gov_is_copy = dep.gov().copyCount()
if dep_is_copy or gov_is_copy:
extra = {}
if dep_is_copy:
extra['dep_is_copy'] = dep_is_copy
if gov_is_copy:
extra['gov_is_copy'] = gov_is_copy
else:
extra = None
add_token(index, form, head, deprel, extra)
covered_indices.add(index)
if include_erased:
# see if there are any tokens that were erased
# and add them as well
all_indices = set(indices_to_words.keys())
for index in all_indices - covered_indices:
form = indices_to_words[index].value()
if not include_punct and not self.puncFilter(form):
continue
add_token(index, form, head=0, deprel='erased', extra=None)
# erased generally disrupt the ordering of the sentence
sentence.sort()
if representation == 'basic':
sentence.renumber()
return sentence | Arguments are as in StanfordDependencies.convert_trees but with
the addition of add_lemmas. If add_lemmas=True, we will run the
Stanford CoreNLP lemmatizer and fill in the lemma field. | entailment |
def stem(self, form, tag):
"""Returns the stem of word with specific form and part-of-speech
tag according to the Stanford lemmatizer. Lemmas are cached."""
key = (form, tag)
if key not in self.lemma_cache:
lemma = self.stemmer(*key).word()
self.lemma_cache[key] = lemma
return self.lemma_cache[key] | Returns the stem of word with specific form and part-of-speech
tag according to the Stanford lemmatizer. Lemmas are cached. | entailment |
def _get_deps(self, tree, include_punct, representation, universal):
"""Get a list of dependencies from a Stanford Tree for a specific
Stanford Dependencies representation."""
if universal:
converter = self.universal_converter
if self.universal_converter == self.converter:
import warnings
warnings.warn("This jar doesn't support universal "
"dependencies, falling back to Stanford "
"Dependencies. To suppress this message, "
"call with universal=False")
else:
converter = self.converter
if include_punct:
egs = converter(tree, self.acceptFilter)
else:
egs = converter(tree)
if representation == 'basic':
deps = egs.typedDependencies()
elif representation == 'collapsed':
deps = egs.typedDependenciesCollapsed(True)
elif representation == 'CCprocessed':
deps = egs.typedDependenciesCCprocessed(True)
else:
# _raise_on_bad_representation should ensure that this
# assertion doesn't fail
assert representation == 'collapsedTree'
deps = egs.typedDependenciesCollapsedTree()
return self._listify(deps) | Get a list of dependencies from a Stanford Tree for a specific
Stanford Dependencies representation. | entailment |
def _listify(collection):
"""This is a workaround where Collections are no longer iterable
when using JPype."""
new_list = []
for index in range(len(collection)):
new_list.append(collection[index])
return new_list | This is a workaround where Collections are no longer iterable
when using JPype. | entailment |
def as_conll(self):
"""Represent this Token as a line as a string in CoNLL-X format."""
def get(field):
value = getattr(self, field)
if value is None:
value = '_'
elif field == 'feats':
value = '|'.join(value)
return str(value)
return '\t'.join([get(field) for field in FIELD_NAMES]) | Represent this Token as a line as a string in CoNLL-X format. | entailment |
def from_conll(this_class, text):
"""Construct a Token from a line in CoNLL-X format."""
fields = text.split('\t')
fields[0] = int(fields[0]) # index
fields[6] = int(fields[6]) # head index
if fields[5] != '_': # feats
fields[5] = tuple(fields[5].split('|'))
fields = [value if value != '_' else None for value in fields]
fields.append(None) # for extra
return this_class(**dict(zip(FIELD_NAMES_PLUS, fields))) | Construct a Token from a line in CoNLL-X format. | entailment |
def renumber(self):
"""Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references."""
mapping = {0: 0} # old index -> real index
needs_renumbering = False
for real_index, token in enumerate(self, 1):
mapping[token.index] = real_index
if token.index != real_index:
needs_renumbering = True
if needs_renumbering:
# update all indices
self[:] = [token._replace(index=mapping[token.index],
head=mapping[token.head])
for token in self] | Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references. | entailment |
def as_asciitree(self, str_func=None):
"""Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string."""
import asciitree
from collections import defaultdict
children = defaultdict(list)
# since erased nodes may be missing, multiple tokens may have same
# index (CCprocessed), etc.
token_to_index = {}
roots = []
for token in self:
children[token.head].append(token)
token_to_index[token] = token.index
if token.head == 0:
roots.append(token)
assert roots, "Couldn't find root Token(s)"
if len(roots) > 1:
# multiple roots so we make a fake one to be their parent
root = Token(0, 'ROOT', 'ROOT-LEMMA', 'ROOT-CPOS', 'ROOT-POS',
None, None, 'ROOT-DEPREL', None, None, None)
token_to_index[root] = 0
children[0] = roots
else:
root = roots[0]
def child_func(token):
index = token_to_index[token]
return children[index]
if not str_func:
def str_func(token):
return ' %s [%s]' % (token.form, token.deprel)
return asciitree.draw_tree(root, child_func, str_func) | Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string. | entailment |
def as_dotgraph(self, digraph_kwargs=None, id_prefix=None,
node_formatter=None, edge_formatter=None):
"""Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root."""
digraph_kwargs = digraph_kwargs or {}
id_prefix = id_prefix or ''
node_formatter = node_formatter or (lambda token: {})
edge_formatter = edge_formatter or (lambda token: {})
import graphviz
graph = graphviz.Digraph(**digraph_kwargs)
# add root node
graph.node(id_prefix + '0', 'root', **node_formatter(None))
# add remaining nodes and edges
already_added = set()
for token in self:
token_id = id_prefix + str(token.index)
parent_id = id_prefix + str(token.head)
if token_id not in already_added:
graph.node(token_id, token.form, **node_formatter(token))
graph.edge(parent_id, token_id, label=token.deprel,
**edge_formatter(token))
already_added.add(token_id)
return graph | Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root. | entailment |
def from_conll(this_class, stream):
"""Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one."""
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence | Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one. | entailment |
def from_stanford_dependencies(this_class, stream, tree,
include_erased=False, include_punct=True):
"""Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well."""
stream = iter(stream)
sentence = this_class()
covered_indices = set()
tags_and_words = ptb_tags_and_words_re.findall(tree)
# perform some basic cleanups
tags_and_words = [(tag, word.replace(r'\/', '/'))
for (tag, word) in tags_and_words if tag != '-NONE-']
for line in stream:
if not line.strip():
if sentence:
# empty line means the sentence is over
break
else:
continue
line = line.replace(r'\/', '/')
matches = deps_re.findall(line)
assert len(matches) == 1
(deprel, gov_form, head, gov_is_copy, form, index,
dep_is_copy) = matches[0]
index = int(index)
tag, word = tags_and_words[index - 1]
assert form == word
covered_indices.add(index)
if not include_punct and deprel == 'punct':
continue
if gov_is_copy or dep_is_copy:
extra = {}
if gov_is_copy:
extra['gov_is_copy'] = len(gov_is_copy)
if dep_is_copy:
extra['dep_is_copy'] = len(dep_is_copy)
else:
extra = None
token = Token(index, form, None, tag, tag, None, int(head),
deprel, None, None, extra)
sentence.append(token)
if include_erased:
# look through words in the tree to see if any of them
# were erased
for index, (tag, word) in enumerate(tags_and_words, 1):
if index in covered_indices:
continue
token = Token(index, word, None, tag, tag, None, 0,
'erased', None, None, None)
sentence.append(token)
sentence.sort()
return sentence | Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well. | entailment |
def from_conll(this_class, stream):
"""Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format."""
stream = iter(stream)
corpus = this_class()
while 1:
# read until we get an empty sentence
sentence = Sentence.from_conll(stream)
if sentence:
corpus.append(sentence)
else:
break
return corpus | Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format. | entailment |
def from_stanford_dependencies(this_class, stream, trees,
include_erased=False, include_punct=True):
"""Construct a Corpus. stream is an iterable over strings where
each string is a line representing a Stanford Dependency as in
the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
Sentences are separated by blank lines. A corresponding list of
Penn Treebank formatted trees must be provided as well."""
stream = iter(stream)
corpus = this_class()
for tree in trees:
sentence = Sentence.from_stanford_dependencies(stream,
tree,
include_erased,
include_punct)
corpus.append(sentence)
return corpus | Construct a Corpus. stream is an iterable over strings where
each string is a line representing a Stanford Dependency as in
the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
Sentences are separated by blank lines. A corresponding list of
Penn Treebank formatted trees must be provided as well. | entailment |
def convert_trees(self, ptb_trees, representation='basic',
include_punct=True, include_erased=False, universal=True,
debug=False):
"""Convert a list of Penn Treebank formatted trees (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences, where each sentence is itself a list of
Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Setting debug=True will cause debugging information (including
the java command run to be printed."""
self._raise_on_bad_representation(representation)
input_file = tempfile.NamedTemporaryFile(delete=False)
try:
for ptb_tree in ptb_trees:
self._raise_on_bad_input(ptb_tree)
tree_with_line_break = ptb_tree + "\n"
input_file.write(tree_with_line_break.encode("utf-8"))
input_file.flush()
input_file.close()
command = [self.java_command,
'-ea',
'-cp', self.jar_filename,
JAVA_CLASS_NAME,
'-' + representation,
'-treeFile', input_file.name]
# if we're including erased, we want to include punctuation
# since otherwise we won't know what SD considers punctuation
if include_punct or include_erased:
command.append('-keepPunct')
if not universal:
command.append('-originalDependencies')
if debug:
print('Command:', ' '.join(command))
sd_process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
return_code = sd_process.wait()
stderr = sd_process.stderr.read()
stdout = sd_process.stdout.read()
if debug:
print("stdout: {%s}" % stdout)
print("stderr: {%s}" % stderr)
print('Exit code:', return_code)
self._raise_on_bad_exit_or_output(return_code, stderr)
finally:
os.remove(input_file.name)
try:
sentences = Corpus.from_stanford_dependencies(stdout.splitlines(),
ptb_trees,
include_erased,
include_punct)
for sentence, ptb_tree in zip(sentences, ptb_trees):
if len(sentence) == 0:
raise ValueError("Invalid PTB tree: %r" % ptb_tree)
except:
print("Error during conversion")
if not debug:
print("stdout: {%s}" % stdout)
print("stderr: {%s}" % stderr)
raise
assert len(sentences) == len(ptb_trees), \
"Only got %d sentences from Stanford Dependencies when " \
"given %d trees." % (len(sentences), len(ptb_trees))
return sentences | Convert a list of Penn Treebank formatted trees (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences, where each sentence is itself a list of
Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Setting debug=True will cause debugging information (including
the java command run to be printed. | entailment |
def endpoint_from_name(endpoint_name):
"""The object used for interacting with the named relations, or None.
"""
if endpoint_name is None:
return None
factory = relation_factory(endpoint_name)
if factory:
return factory.from_name(endpoint_name) | The object used for interacting with the named relations, or None. | entailment |
def endpoint_from_flag(flag):
"""The object used for interacting with relations tied to a flag, or None.
"""
relation_name = None
value = _get_flag_value(flag)
if isinstance(value, dict) and 'relation' in value:
# old-style RelationBase
relation_name = value['relation']
elif flag.startswith('endpoint.'):
# new-style Endpoint
relation_name = flag.split('.')[1]
elif '.' in flag:
# might be an unprefixed new-style Endpoint
relation_name = flag.split('.')[0]
if relation_name not in hookenv.relation_types():
return None
if relation_name:
factory = relation_factory(relation_name)
if factory:
return factory.from_flag(flag)
return None | The object used for interacting with relations tied to a flag, or None. | entailment |
def relation_factory(relation_name):
"""Get the RelationFactory for the given relation name.
Looks for a RelationFactory in the first file matching:
``$CHARM_DIR/hooks/relations/{interface}/{provides,requires,peer}.py``
"""
role, interface = hookenv.relation_to_role_and_interface(relation_name)
if not (role and interface):
hookenv.log('Unable to determine role and interface for relation '
'{}'.format(relation_name), hookenv.ERROR)
return None
return _find_relation_factory(_relation_module(role, interface)) | Get the RelationFactory for the given relation name.
Looks for a RelationFactory in the first file matching:
``$CHARM_DIR/hooks/relations/{interface}/{provides,requires,peer}.py`` | entailment |
def _relation_module(role, interface):
"""
Return module for relation based on its role and interface, or None.
Prefers new location (reactive/relations) over old (hooks/relations).
"""
_append_path(hookenv.charm_dir())
_append_path(os.path.join(hookenv.charm_dir(), 'hooks'))
base_module = 'relations.{}.{}'.format(interface, role)
for module in ('reactive.{}'.format(base_module), base_module):
if module in sys.modules:
break
try:
importlib.import_module(module)
break
except ImportError:
continue
else:
hookenv.log('Unable to find implementation for relation: '
'{} of {}'.format(role, interface), hookenv.ERROR)
return None
return sys.modules[module] | Return module for relation based on its role and interface, or None.
Prefers new location (reactive/relations) over old (hooks/relations). | entailment |
def _find_relation_factory(module):
"""
Attempt to find a RelationFactory subclass in the module.
Note: RelationFactory and RelationBase are ignored so they may
be imported to be used as base classes without fear.
"""
if not module:
return None
# All the RelationFactory subclasses
candidates = [o for o in (getattr(module, attr) for attr in dir(module))
if (o is not RelationFactory and
o is not RelationBase and
isclass(o) and
issubclass(o, RelationFactory))]
# Filter out any factories that are superclasses of another factory
# (none of the other factories subclass it). This usually makes
# the explict check for RelationBase and RelationFactory unnecessary.
candidates = [c1 for c1 in candidates
if not any(issubclass(c2, c1) for c2 in candidates
if c1 is not c2)]
if not candidates:
hookenv.log('No RelationFactory found in {}'.format(module.__name__),
hookenv.WARNING)
return None
if len(candidates) > 1:
raise RuntimeError('Too many RelationFactory found in {}'
''.format(module.__name__))
return candidates[0] | Attempt to find a RelationFactory subclass in the module.
Note: RelationFactory and RelationBase are ignored so they may
be imported to be used as base classes without fear. | entailment |
def _migrate_conversations(): # noqa
"""
Due to issue #28 (https://github.com/juju-solutions/charms.reactive/issues/28),
conversations needed to be updated to be namespaced per relation ID for SERVICE
and UNIT scope. To ensure backwards compatibility, this updates all convs in
the old format to the new.
TODO: Remove in 2.0.0
"""
for key, data in unitdata.kv().getrange('reactive.conversations.').items():
if 'local-data' in key:
continue
if 'namespace' in data:
continue
relation_name = data.pop('relation_name')
if data['scope'] == scopes.GLOBAL:
data['namespace'] = relation_name
unitdata.kv().set(key, data)
else:
# split the conv based on the relation ID
new_keys = []
for rel_id in hookenv.relation_ids(relation_name):
new_key = Conversation._key(rel_id, data['scope'])
new_units = set(hookenv.related_units(rel_id)) & set(data['units'])
if new_units:
unitdata.kv().set(new_key, {
'namespace': rel_id,
'scope': data['scope'],
'units': sorted(new_units),
})
new_keys.append(new_key)
unitdata.kv().unset(key)
# update the states pointing to the old conv key to point to the
# (potentially multiple) new key(s)
for flag in get_flags():
value = _get_flag_value(flag)
if not value:
continue
if key not in value['conversations']:
continue
value['conversations'].remove(key)
value['conversations'].extend(new_keys)
set_flag(flag, value) | Due to issue #28 (https://github.com/juju-solutions/charms.reactive/issues/28),
conversations needed to be updated to be namespaced per relation ID for SERVICE
and UNIT scope. To ensure backwards compatibility, this updates all convs in
the old format to the new.
TODO: Remove in 2.0.0 | entailment |
def relation_call(method, relation_name=None, flag=None, state=None, *args):
"""Invoke a method on the class implementing a relation via the CLI"""
if relation_name:
relation = relation_from_name(relation_name)
if relation is None:
raise ValueError('Relation not found: %s' % relation_name)
elif flag or state:
relation = relation_from_flag(flag or state)
if relation is None:
raise ValueError('Relation not found: %s' % (flag or state))
else:
raise ValueError('Must specify either relation_name or flag')
result = getattr(relation, method)(*args)
if isinstance(relation, RelationBase) and method == 'conversations':
# special case for conversations to make them work from CLI
result = [c.scope for c in result]
return result | Invoke a method on the class implementing a relation via the CLI | entailment |
def from_flag(cls, flag):
"""
Find relation implementation in the current charm, based on the
name of an active flag.
You should not use this method directly.
Use :func:`endpoint_from_flag` instead.
"""
value = _get_flag_value(flag)
if value is None:
return None
relation_name = value['relation']
conversations = Conversation.load(value['conversations'])
return cls.from_name(relation_name, conversations) | Find relation implementation in the current charm, based on the
name of an active flag.
You should not use this method directly.
Use :func:`endpoint_from_flag` instead. | entailment |
def from_name(cls, relation_name, conversations=None):
"""
Find relation implementation in the current charm, based on the
name of the relation.
:return: A Relation instance, or None
"""
if relation_name is None:
return None
relation_class = cls._cache.get(relation_name)
if relation_class:
return relation_class(relation_name, conversations)
role, interface = hookenv.relation_to_role_and_interface(relation_name)
if role and interface:
relation_class = cls._find_impl(role, interface)
if relation_class:
cls._cache[relation_name] = relation_class
return relation_class(relation_name, conversations)
return None | Find relation implementation in the current charm, based on the
name of the relation.
:return: A Relation instance, or None | entailment |
def _find_impl(cls, role, interface):
"""
Find relation implementation based on its role and interface.
"""
module = _relation_module(role, interface)
if not module:
return None
return cls._find_subclass(module) | Find relation implementation based on its role and interface. | entailment |
def _find_subclass(cls, module):
"""
Attempt to find subclass of :class:`RelationBase` in the given module.
Note: This means strictly subclasses and not :class:`RelationBase` itself.
This is to prevent picking up :class:`RelationBase` being imported to be
used as the base class.
"""
for attr in dir(module):
candidate = getattr(module, attr)
if (isclass(candidate) and issubclass(candidate, cls) and
candidate is not RelationBase):
return candidate
return None | Attempt to find subclass of :class:`RelationBase` in the given module.
Note: This means strictly subclasses and not :class:`RelationBase` itself.
This is to prevent picking up :class:`RelationBase` being imported to be
used as the base class. | entailment |
def conversation(self, scope=None):
"""
Get a single conversation, by scope, that this relation is currently handling.
If the scope is not given, the correct scope is inferred by the current
hook execution context. If there is no current hook execution context, it
is assume that there is only a single global conversation scope for this
relation. If this relation's scope is not global and there is no current
hook execution context, then an error is raised.
"""
if scope is None:
if self.scope is scopes.UNIT:
scope = hookenv.remote_unit()
elif self.scope is scopes.SERVICE:
scope = hookenv.remote_service_name()
else:
scope = self.scope
if scope is None:
raise ValueError('Unable to determine default scope: no current hook or global scope')
for conversation in self._conversations:
if conversation.scope == scope:
return conversation
else:
raise ValueError("Conversation with scope '%s' not found" % scope) | Get a single conversation, by scope, that this relation is currently handling.
If the scope is not given, the correct scope is inferred by the current
hook execution context. If there is no current hook execution context, it
is assume that there is only a single global conversation scope for this
relation. If this relation's scope is not global and there is no current
hook execution context, then an error is raised. | entailment |
def toggle_state(self, state, active=TOGGLE, scope=None):
"""
Toggle the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).toggle_state(state, active)
See :meth:`conversation` and :meth:`Conversation.toggle_state`.
"""
self.conversation(scope).toggle_state(state, active) | Toggle the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).toggle_state(state, active)
See :meth:`conversation` and :meth:`Conversation.toggle_state`. | entailment |
def set_remote(self, key=None, value=None, data=None, scope=None, **kwdata):
"""
Set data for the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).set_remote(key, value, data, scope, **kwdata)
See :meth:`conversation` and :meth:`Conversation.set_remote`.
"""
self.conversation(scope).set_remote(key, value, data, **kwdata) | Set data for the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).set_remote(key, value, data, scope, **kwdata)
See :meth:`conversation` and :meth:`Conversation.set_remote`. | entailment |
def get_remote(self, key, default=None, scope=None):
"""
Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`.
"""
return self.conversation(scope).get_remote(key, default) | Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`. | entailment |
def set_local(self, key=None, value=None, data=None, scope=None, **kwdata):
"""
Locally store some data, namespaced by the current or given :class:`Conversation` scope.
In Python, this is equivalent to::
relation.conversation(scope).set_local(data, scope, **kwdata)
See :meth:`conversation` and :meth:`Conversation.set_local`.
"""
self.conversation(scope).set_local(key, value, data, **kwdata) | Locally store some data, namespaced by the current or given :class:`Conversation` scope.
In Python, this is equivalent to::
relation.conversation(scope).set_local(data, scope, **kwdata)
See :meth:`conversation` and :meth:`Conversation.set_local`. | entailment |
def get_local(self, key, default=None, scope=None):
"""
Retrieve some data previously set via :meth:`set_local`.
In Python, this is equivalent to::
relation.conversation(scope).get_local(key, default)
See :meth:`conversation` and :meth:`Conversation.get_local`.
"""
return self.conversation(scope).get_local(key, default) | Retrieve some data previously set via :meth:`set_local`.
In Python, this is equivalent to::
relation.conversation(scope).get_local(key, default)
See :meth:`conversation` and :meth:`Conversation.get_local`. | entailment |
def relation_ids(self):
"""
The set of IDs of the specific relation instances that this conversation
is communicating with.
"""
if self.scope == scopes.GLOBAL:
# the namespace is the relation name and this conv speaks for all
# connected instances of that relation
return hookenv.relation_ids(self.namespace)
else:
# the namespace is the relation ID
return [self.namespace] | The set of IDs of the specific relation instances that this conversation
is communicating with. | entailment |
def join(cls, scope):
"""
Get or create a conversation for the given scope and active hook context.
The current remote unit for the active hook context will be added to
the conversation.
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
"""
relation_name = hookenv.relation_type()
relation_id = hookenv.relation_id()
unit = hookenv.remote_unit()
service = hookenv.remote_service_name()
if scope is scopes.UNIT:
scope = unit
namespace = relation_id
elif scope is scopes.SERVICE:
scope = service
namespace = relation_id
else:
namespace = relation_name
key = cls._key(namespace, scope)
data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
conversation = cls.deserialize(data)
conversation.units.add(unit)
unitdata.kv().set(key, cls.serialize(conversation))
return conversation | Get or create a conversation for the given scope and active hook context.
The current remote unit for the active hook context will be added to
the conversation.
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called. | entailment |
def depart(self):
"""
Remove the current remote unit, for the active hook context, from
this conversation. This should be called from a `-departed` hook.
"""
unit = hookenv.remote_unit()
self.units.remove(unit)
if self.units:
unitdata.kv().set(self.key, self.serialize(self))
else:
unitdata.kv().unset(self.key) | Remove the current remote unit, for the active hook context, from
this conversation. This should be called from a `-departed` hook. | entailment |
def serialize(cls, conversation):
"""
Serialize a conversation instance for storage.
"""
return {
'namespace': conversation.namespace,
'units': sorted(conversation.units),
'scope': conversation.scope,
} | Serialize a conversation instance for storage. | entailment |
def load(cls, keys):
"""
Load a set of conversations by their keys.
"""
conversations = []
for key in keys:
conversation = unitdata.kv().get(key)
if conversation:
conversations.append(cls.deserialize(conversation))
return conversations | Load a set of conversations by their keys. | entailment |
def set_state(self, state):
"""
Activate and put this conversation into the given state.
The relation name will be interpolated in the state name, and it is
recommended that it be included to avoid conflicts with states from
other relations. For example::
conversation.set_state('{relation_name}.state')
If called from a converation handling the relation "foo", this will
activate the "foo.state" state, and will add this conversation to
that state.
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
"""
state = state.format(relation_name=self.relation_name)
value = _get_flag_value(state, {
'relation': self.relation_name,
'conversations': [],
})
if self.key not in value['conversations']:
value['conversations'].append(self.key)
set_flag(state, value) | Activate and put this conversation into the given state.
The relation name will be interpolated in the state name, and it is
recommended that it be included to avoid conflicts with states from
other relations. For example::
conversation.set_state('{relation_name}.state')
If called from a converation handling the relation "foo", this will
activate the "foo.state" state, and will add this conversation to
that state.
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called. | entailment |
def remove_state(self, state):
"""
Remove this conversation from the given state, and potentially
deactivate the state if no more conversations are in it.
The relation name will be interpolated in the state name, and it is
recommended that it be included to avoid conflicts with states from
other relations. For example::
conversation.remove_state('{relation_name}.state')
If called from a converation handling the relation "foo", this will
remove the conversation from the "foo.state" state, and, if no more
conversations are in this the state, will deactivate it.
"""
state = state.format(relation_name=self.relation_name)
value = _get_flag_value(state)
if not value:
return
if self.key in value['conversations']:
value['conversations'].remove(self.key)
if value['conversations']:
set_flag(state, value)
else:
clear_flag(state) | Remove this conversation from the given state, and potentially
deactivate the state if no more conversations are in it.
The relation name will be interpolated in the state name, and it is
recommended that it be included to avoid conflicts with states from
other relations. For example::
conversation.remove_state('{relation_name}.state')
If called from a converation handling the relation "foo", this will
remove the conversation from the "foo.state" state, and, if no more
conversations are in this the state, will deactivate it. | entailment |
def is_state(self, state):
"""
Test if this conversation is in the given state.
"""
state = state.format(relation_name=self.relation_name)
value = _get_flag_value(state)
if not value:
return False
return self.key in value['conversations'] | Test if this conversation is in the given state. | entailment |
def toggle_state(self, state, active=TOGGLE):
"""
Toggle the given state for this conversation.
The state will be set ``active`` is ``True``, otherwise the state will be removed.
If ``active`` is not given, it will default to the inverse of the current state
(i.e., ``False`` if the state is currently set, ``True`` if it is not; essentially
toggling the state).
For example::
conv.toggle_state('{relation_name}.foo', value=='foo')
This will set the state if ``value`` is equal to ``foo``.
"""
if active is TOGGLE:
active = not self.is_state(state)
if active:
self.set_state(state)
else:
self.remove_state(state) | Toggle the given state for this conversation.
The state will be set ``active`` is ``True``, otherwise the state will be removed.
If ``active`` is not given, it will default to the inverse of the current state
(i.e., ``False`` if the state is currently set, ``True`` if it is not; essentially
toggling the state).
For example::
conv.toggle_state('{relation_name}.foo', value=='foo')
This will set the state if ``value`` is equal to ``foo``. | entailment |
def set_remote(self, key=None, value=None, data=None, **kwdata):
"""
Set data for the remote end(s) of this conversation.
Data can be passed in either as a single dict, or as key-word args.
Note that, in Juju, setting relation data is inherently service scoped.
That is, if the conversation only includes a single unit, the data will
still be set for that unit's entire service.
However, if this conversation's scope encompasses multiple services,
the data will be set for all of those services.
:param str key: The name of a field to set.
:param value: A value to set. This value must be json serializable.
:param dict data: A mapping of keys to values.
:param **kwdata: A mapping of keys to values, as keyword arguments.
"""
if data is None:
data = {}
if key is not None:
data[key] = value
data.update(kwdata)
if not data:
return
for relation_id in self.relation_ids:
hookenv.relation_set(relation_id, data) | Set data for the remote end(s) of this conversation.
Data can be passed in either as a single dict, or as key-word args.
Note that, in Juju, setting relation data is inherently service scoped.
That is, if the conversation only includes a single unit, the data will
still be set for that unit's entire service.
However, if this conversation's scope encompasses multiple services,
the data will be set for all of those services.
:param str key: The name of a field to set.
:param value: A value to set. This value must be json serializable.
:param dict data: A mapping of keys to values.
:param **kwdata: A mapping of keys to values, as keyword arguments. | entailment |
def get_remote(self, key, default=None):
"""
Get a value from the remote end(s) of this conversation.
Note that if a conversation's scope encompasses multiple units, then
those units are expected to agree on their data, whether that is through
relying on a single leader to set the data or by all units eventually
converging to identical data. Thus, this method returns the first
value that it finds set by any of its units.
"""
cur_rid = hookenv.relation_id()
departing = hookenv.hook_name().endswith('-relation-departed')
for relation_id in self.relation_ids:
units = hookenv.related_units(relation_id)
if departing and cur_rid == relation_id:
# Work around the fact that Juju 2.0 doesn't include the
# departing unit in relation-list during the -departed hook,
# by adding it back in ourselves.
units.append(hookenv.remote_unit())
for unit in units:
if unit not in self.units:
continue
value = hookenv.relation_get(key, unit, relation_id)
if value:
return value
return default | Get a value from the remote end(s) of this conversation.
Note that if a conversation's scope encompasses multiple units, then
those units are expected to agree on their data, whether that is through
relying on a single leader to set the data or by all units eventually
converging to identical data. Thus, this method returns the first
value that it finds set by any of its units. | entailment |
def set_local(self, key=None, value=None, data=None, **kwdata):
"""
Locally store some data associated with this conversation.
Data can be passed in either as a single dict, or as key-word args.
For example, if you need to store the previous value of a remote field
to determine if it has changed, you can use the following::
prev = conversation.get_local('field')
curr = conversation.get_remote('field')
if prev != curr:
handle_change(prev, curr)
conversation.set_local('field', curr)
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
:param str key: The name of a field to set.
:param value: A value to set. This value must be json serializable.
:param dict data: A mapping of keys to values.
:param **kwdata: A mapping of keys to values, as keyword arguments.
"""
if data is None:
data = {}
if key is not None:
data[key] = value
data.update(kwdata)
if not data:
return
unitdata.kv().update(data, prefix='%s.%s.' % (self.key, 'local-data')) | Locally store some data associated with this conversation.
Data can be passed in either as a single dict, or as key-word args.
For example, if you need to store the previous value of a remote field
to determine if it has changed, you can use the following::
prev = conversation.get_local('field')
curr = conversation.get_remote('field')
if prev != curr:
handle_change(prev, curr)
conversation.set_local('field', curr)
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
:param str key: The name of a field to set.
:param value: A value to set. This value must be json serializable.
:param dict data: A mapping of keys to values.
:param **kwdata: A mapping of keys to values, as keyword arguments. | entailment |
def get_local(self, key, default=None):
"""
Retrieve some data previously set via :meth:`set_local` for this conversation.
"""
key = '%s.%s.%s' % (self.key, 'local-data', key)
return unitdata.kv().get(key, default) | Retrieve some data previously set via :meth:`set_local` for this conversation. | entailment |
def set_flag(flag, value=None):
"""set_flag(flag)
Set the given flag as active.
:param str flag: Name of flag to set.
.. note:: **Changes to flags are reset when a handler crashes.** Changes to
flags happen immediately, but they are only persisted at the end of a
complete and successful run of the reactive framework. All unpersisted
changes are discarded when a hook crashes.
"""
old_flags = get_flags()
unitdata.kv().update({flag: value}, prefix='reactive.states.')
if flag not in old_flags:
tracer().set_flag(flag)
FlagWatch.change(flag)
trigger = _get_trigger(flag, None)
for flag_name in trigger['set_flag']:
set_flag(flag_name)
for flag_name in trigger['clear_flag']:
clear_flag(flag_name) | set_flag(flag)
Set the given flag as active.
:param str flag: Name of flag to set.
.. note:: **Changes to flags are reset when a handler crashes.** Changes to
flags happen immediately, but they are only persisted at the end of a
complete and successful run of the reactive framework. All unpersisted
changes are discarded when a hook crashes. | entailment |
def clear_flag(flag):
"""
Clear / deactivate a flag.
:param str flag: Name of flag to set.
.. note:: **Changes to flags are reset when a handler crashes.** Changes to
flags happen immediately, but they are only persisted at the end of a
complete and successful run of the reactive framework. All unpersisted
changes are discarded when a hook crashes.
"""
old_flags = get_flags()
unitdata.kv().unset('reactive.states.%s' % flag)
unitdata.kv().set('reactive.dispatch.removed_state', True)
if flag in old_flags:
tracer().clear_flag(flag)
FlagWatch.change(flag)
trigger = _get_trigger(None, flag)
for flag_name in trigger['set_flag']:
set_flag(flag_name)
for flag_name in trigger['clear_flag']:
clear_flag(flag_name) | Clear / deactivate a flag.
:param str flag: Name of flag to set.
.. note:: **Changes to flags are reset when a handler crashes.** Changes to
flags happen immediately, but they are only persisted at the end of a
complete and successful run of the reactive framework. All unpersisted
changes are discarded when a hook crashes. | entailment |
def register_trigger(when=None, when_not=None, set_flag=None, clear_flag=None):
"""
Register a trigger to set or clear a flag when a given flag is set.
Note: Flag triggers are handled at the same time that the given flag is set.
:param str when: Flag to trigger on when it is set.
:param str when_not: Flag to trigger on when it is cleared.
:param str set_flag: If given, this flag will be set when `when` is set.
:param str clear_flag: If given, this flag will be cleared when `when` is set.
Note: Exactly one of either `when` or `when_not`, and at least one of
`set_flag` or `clear_flag` must be provided.
"""
if not any((when, when_not)):
raise ValueError('Must provide one of when or when_not')
if all((when, when_not)):
raise ValueError('Only one of when or when_not can be provided')
if not any((set_flag, clear_flag)):
raise ValueError('Must provide at least one of set_flag or clear_flag')
trigger = _get_trigger(when, when_not)
if set_flag and set_flag not in trigger['set_flag']:
trigger['set_flag'].append(set_flag)
if clear_flag and clear_flag not in trigger['clear_flag']:
trigger['clear_flag'].append(clear_flag)
_save_trigger(when, when_not, trigger) | Register a trigger to set or clear a flag when a given flag is set.
Note: Flag triggers are handled at the same time that the given flag is set.
:param str when: Flag to trigger on when it is set.
:param str when_not: Flag to trigger on when it is cleared.
:param str set_flag: If given, this flag will be set when `when` is set.
:param str clear_flag: If given, this flag will be cleared when `when` is set.
Note: Exactly one of either `when` or `when_not`, and at least one of
`set_flag` or `clear_flag` must be provided. | entailment |
def get_flags():
"""
Return a list of all flags which are set.
"""
flags = unitdata.kv().getrange('reactive.states.', strip=True) or {}
return sorted(flags.keys()) | Return a list of all flags which are set. | entailment |
def dispatch(restricted=False):
"""
Dispatch registered handlers.
When dispatching in restricted mode, only matching hook handlers are executed.
Handlers are dispatched according to the following rules:
* Handlers are repeatedly tested and invoked in iterations, until the system
settles into quiescence (that is, until no new handlers match to be invoked).
* In the first iteration, :func:`@hook <charms.reactive.decorators.hook>`
and :func:`@action <charms.reactive.decorators.action>` handlers will
be invoked, if they match.
* In subsequent iterations, other handlers are invoked, if they match.
* Added flags will not trigger new handlers until the next iteration,
to ensure that chained flags are invoked in a predictable order.
* Removed flags will cause the current set of matched handlers to be
re-tested, to ensure that no handler is invoked after its matching
flag has been removed.
* Other than the guarantees mentioned above, the order in which matching
handlers are invoked is undefined.
* Flags are preserved between hook and action invocations, and all matching
handlers are re-invoked for every hook and action. There are
:doc:`decorators <charms.reactive.decorators>` and
:doc:`helpers <charms.reactive.helpers>`
to prevent unnecessary reinvocations, such as
:func:`~charms.reactive.decorators.only_once`.
"""
FlagWatch.reset()
def _test(to_test):
return list(filter(lambda h: h.test(), to_test))
def _invoke(to_invoke):
while to_invoke:
unitdata.kv().set('reactive.dispatch.removed_state', False)
for handler in list(to_invoke):
to_invoke.remove(handler)
hookenv.log('Invoking reactive handler: %s' % handler.id(), level=hookenv.INFO)
handler.invoke()
if unitdata.kv().get('reactive.dispatch.removed_state'):
# re-test remaining handlers
to_invoke = _test(to_invoke)
break
FlagWatch.commit()
tracer().start_dispatch()
# When in restricted context, only run hooks for that context.
if restricted:
unitdata.kv().set('reactive.dispatch.phase', 'restricted')
hook_handlers = _test(Handler.get_handlers())
tracer().start_dispatch_phase('restricted', hook_handlers)
_invoke(hook_handlers)
return
unitdata.kv().set('reactive.dispatch.phase', 'hooks')
hook_handlers = _test(Handler.get_handlers())
tracer().start_dispatch_phase('hooks', hook_handlers)
_invoke(hook_handlers)
unitdata.kv().set('reactive.dispatch.phase', 'other')
for i in range(100):
FlagWatch.iteration(i)
other_handlers = _test(Handler.get_handlers())
if i == 0:
tracer().start_dispatch_phase('other', other_handlers)
tracer().start_dispatch_iteration(i, other_handlers)
if not other_handlers:
break
_invoke(other_handlers)
FlagWatch.reset() | Dispatch registered handlers.
When dispatching in restricted mode, only matching hook handlers are executed.
Handlers are dispatched according to the following rules:
* Handlers are repeatedly tested and invoked in iterations, until the system
settles into quiescence (that is, until no new handlers match to be invoked).
* In the first iteration, :func:`@hook <charms.reactive.decorators.hook>`
and :func:`@action <charms.reactive.decorators.action>` handlers will
be invoked, if they match.
* In subsequent iterations, other handlers are invoked, if they match.
* Added flags will not trigger new handlers until the next iteration,
to ensure that chained flags are invoked in a predictable order.
* Removed flags will cause the current set of matched handlers to be
re-tested, to ensure that no handler is invoked after its matching
flag has been removed.
* Other than the guarantees mentioned above, the order in which matching
handlers are invoked is undefined.
* Flags are preserved between hook and action invocations, and all matching
handlers are re-invoked for every hook and action. There are
:doc:`decorators <charms.reactive.decorators>` and
:doc:`helpers <charms.reactive.helpers>`
to prevent unnecessary reinvocations, such as
:func:`~charms.reactive.decorators.only_once`. | entailment |
def discover():
"""
Discover handlers based on convention.
Handlers will be loaded from the following directories and their subdirectories:
* ``$CHARM_DIR/reactive/``
* ``$CHARM_DIR/hooks/reactive/``
* ``$CHARM_DIR/hooks/relations/``
They can be Python files, in which case they will be imported and decorated
functions registered. Or they can be executables, in which case they must
adhere to the :class:`ExternalHandler` protocol.
"""
# Add $CHARM_DIR and $CHARM_DIR/hooks to sys.path so
# 'import reactive.leadership', 'import relations.pgsql' works
# as expected, as well as relative imports like 'import ..leadership'
# or 'from . import leadership'. Without this, it becomes difficult
# for layers to access APIs provided by other layers. This addition
# needs to remain in effect, in case discovered modules are doing
# late imports.
_append_path(hookenv.charm_dir())
_append_path(os.path.join(hookenv.charm_dir(), 'hooks'))
for search_dir in ('reactive', 'hooks/reactive', 'hooks/relations'):
search_path = os.path.join(hookenv.charm_dir(), search_dir)
for dirpath, dirnames, filenames in os.walk(search_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
_register_handlers_from_file(search_path, filepath) | Discover handlers based on convention.
Handlers will be loaded from the following directories and their subdirectories:
* ``$CHARM_DIR/reactive/``
* ``$CHARM_DIR/hooks/reactive/``
* ``$CHARM_DIR/hooks/relations/``
They can be Python files, in which case they will be imported and decorated
functions registered. Or they can be executables, in which case they must
adhere to the :class:`ExternalHandler` protocol. | entailment |
def get(cls, action, suffix=None):
"""
Get or register a handler for the given action.
:param func action: Callback that is called when invoking the Handler
:param func suffix: Optional suffix for the handler's ID
"""
action_id = _action_id(action, suffix)
if action_id not in cls._HANDLERS:
if LOG_OPTS['register']:
hookenv.log('Registering reactive handler for %s' % _short_action_id(action, suffix),
level=hookenv.DEBUG)
cls._HANDLERS[action_id] = cls(action, suffix)
return cls._HANDLERS[action_id] | Get or register a handler for the given action.
:param func action: Callback that is called when invoking the Handler
:param func suffix: Optional suffix for the handler's ID | entailment |
def add_predicate(self, predicate):
"""
Add a new predicate callback to this handler.
"""
_predicate = predicate
if isinstance(predicate, partial):
_predicate = 'partial(%s, %s, %s)' % (predicate.func, predicate.args, predicate.keywords)
if LOG_OPTS['register']:
hookenv.log(' Adding predicate for %s: %s' % (self.id(), _predicate), level=hookenv.DEBUG)
self._predicates.append(predicate) | Add a new predicate callback to this handler. | entailment |
def _get_args(self):
"""
Lazily evaluate the args.
"""
if not hasattr(self, '_args_evaled'):
# cache the args in case handler is re-invoked due to flags change
self._args_evaled = list(chain.from_iterable(self._args))
return self._args_evaled | Lazily evaluate the args. | entailment |
def invoke(self):
"""
Invoke this handler.
"""
args = self._get_args()
self._action(*args)
for callback in self._post_callbacks:
callback() | Invoke this handler. | entailment |
def register_flags(self, flags):
"""
Register flags as being relevant to this handler.
Relevant flags will be used to determine if the handler should
be re-invoked due to changes in the set of active flags. If this
handler has already been invoked during this :func:`dispatch` run
and none of its relevant flags have been set or removed since then,
then the handler will be skipped.
This is also used for linting and composition purposes, to determine
if a layer has unhandled flags.
"""
self._CONSUMED_FLAGS.update(flags)
self._flags.update(flags) | Register flags as being relevant to this handler.
Relevant flags will be used to determine if the handler should
be re-invoked due to changes in the set of active flags. If this
handler has already been invoked during this :func:`dispatch` run
and none of its relevant flags have been set or removed since then,
then the handler will be skipped.
This is also used for linting and composition purposes, to determine
if a layer has unhandled flags. | entailment |
def invoke(self):
"""
Call the external handler to be invoked.
"""
# flush to ensure external process can see flags as they currently
# are, and write flags (flush releases lock)
unitdata.kv().flush()
subprocess.check_call([self._filepath, '--invoke', self._test_output], env=os.environ) | Call the external handler to be invoked. | entailment |
def hook(*hook_patterns):
"""
Register the decorated function to run when the current hook matches any of
the ``hook_patterns``.
This decorator is generally deprecated and should only be used when
absolutely necessary.
The hook patterns can use the ``{interface:...}`` and ``{A,B,...}`` syntax
supported by :func:`~charms.reactive.bus.any_hook`.
Note that hook decorators **cannot** be combined with :func:`when` or
:func:`when_not` decorators.
"""
def _register(action):
def arg_gen():
# use a generator to defer calling of hookenv.relation_type, for tests
rel = endpoint_from_name(hookenv.relation_type())
if rel:
yield rel
handler = Handler.get(action)
handler.add_predicate(partial(_hook, hook_patterns))
handler.add_args(arg_gen())
return action
return _register | Register the decorated function to run when the current hook matches any of
the ``hook_patterns``.
This decorator is generally deprecated and should only be used when
absolutely necessary.
The hook patterns can use the ``{interface:...}`` and ``{A,B,...}`` syntax
supported by :func:`~charms.reactive.bus.any_hook`.
Note that hook decorators **cannot** be combined with :func:`when` or
:func:`when_not` decorators. | entailment |
def when_file_changed(*filenames, **kwargs):
"""
Register the decorated function to run when one or more files have changed.
:param list filenames: The names of one or more files to check for changes
(a callable returning the name is also accepted).
:param str hash_type: The type of hash to use for determining if a file has
changed. Defaults to 'md5'. Must be given as a kwarg.
"""
def _register(action):
handler = Handler.get(action)
handler.add_predicate(partial(any_file_changed, filenames, **kwargs))
return action
return _register | Register the decorated function to run when one or more files have changed.
:param list filenames: The names of one or more files to check for changes
(a callable returning the name is also accepted).
:param str hash_type: The type of hash to use for determining if a file has
changed. Defaults to 'md5'. Must be given as a kwarg. | entailment |
def not_unless(*desired_flags):
"""
Assert that the decorated function can only be called if the desired_flags
are active.
Note that, unlike :func:`when`, this does **not** trigger the decorated
function if the flags match. It **only** raises an exception if the
function is called when the flags do not match.
This is primarily for informational purposes and as a guard clause.
"""
def _decorator(func):
action_id = _action_id(func)
short_action_id = _short_action_id(func)
@wraps(func)
def _wrapped(*args, **kwargs):
active_flags = get_flags()
missing_flags = [flag for flag in desired_flags if flag not in active_flags]
if missing_flags:
hookenv.log('%s called before flag%s: %s' % (
short_action_id,
's' if len(missing_flags) > 1 else '',
', '.join(missing_flags)), hookenv.WARNING)
return func(*args, **kwargs)
_wrapped._action_id = action_id
_wrapped._short_action_id = short_action_id
return _wrapped
return _decorator | Assert that the decorated function can only be called if the desired_flags
are active.
Note that, unlike :func:`when`, this does **not** trigger the decorated
function if the flags match. It **only** raises an exception if the
function is called when the flags do not match.
This is primarily for informational purposes and as a guard clause. | entailment |
def only_once(action=None):
"""
.. deprecated:: 0.5.0
Use :func:`when_not` in combination with :func:`set_state` instead. This
handler is deprecated because it might actually be
`called multiple times <https://github.com/juju-solutions/charms.reactive/issues/22>`_.
Register the decorated function to be run once, and only once.
This decorator will never cause arguments to be passed to the handler.
"""
if action is None:
# allow to be used as @only_once or @only_once()
return only_once
action_id = _action_id(action)
handler = Handler.get(action)
handler.add_predicate(lambda: not was_invoked(action_id))
handler.add_post_callback(partial(mark_invoked, action_id))
return action | .. deprecated:: 0.5.0
Use :func:`when_not` in combination with :func:`set_state` instead. This
handler is deprecated because it might actually be
`called multiple times <https://github.com/juju-solutions/charms.reactive/issues/22>`_.
Register the decorated function to be run once, and only once.
This decorator will never cause arguments to be passed to the handler. | entailment |
def collect_metrics():
"""
Register the decorated function to run for the collect_metrics hook.
"""
def _register(action):
handler = Handler.get(action)
handler.add_predicate(partial(_restricted_hook, 'collect-metrics'))
return action
return _register | Register the decorated function to run for the collect_metrics hook. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.