id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
24,200
slundberg/shap
shap/benchmark/plots.py
_human_score_map
def _human_score_map(human_consensus, methods_attrs): """ Converts human agreement differences to numerical scores for coloring. """ v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0) return v
python
def _human_score_map(human_consensus, methods_attrs): """ Converts human agreement differences to numerical scores for coloring. """ v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0) return v
[ "def", "_human_score_map", "(", "human_consensus", ",", "methods_attrs", ")", ":", "v", "=", "1", "-", "min", "(", "np", ".", "sum", "(", "np", ".", "abs", "(", "methods_attrs", "-", "human_consensus", ")", ")", "/", "(", "np", ".", "abs", "(", "human_consensus", ")", ".", "sum", "(", ")", "+", "1", ")", ",", "1.0", ")", "return", "v" ]
Converts human agreement differences to numerical scores for coloring.
[ "Converts", "human", "agreement", "differences", "to", "numerical", "scores", "for", "coloring", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/plots.py#L370-L375
24,201
slundberg/shap
shap/plots/force_matplotlib.py
draw_bars
def draw_bars(out_value, features, feature_type, width_separators, width_bar): """Draw the bars and separators.""" rectangle_list = [] separator_list = [] pre_val = out_value for index, features in zip(range(len(features)), features): if feature_type == 'positive': left_bound = float(features[0]) right_bound = pre_val pre_val = left_bound separator_indent = np.abs(width_separators) separator_pos = left_bound colors = ['#FF0D57', '#FFC3D5'] else: left_bound = pre_val right_bound = float(features[0]) pre_val = right_bound separator_indent = - np.abs(width_separators) separator_pos = right_bound colors = ['#1E88E5', '#D1E6FA'] # Create rectangle if index == 0: if feature_type == 'positive': points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent, (width_bar / 2)] ] else: points_rectangle = [[right_bound, 0], [left_bound, 0], [left_bound, width_bar], [right_bound, width_bar], [right_bound + separator_indent, (width_bar / 2)] ] else: points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound + separator_indent * 0.90, (width_bar / 2)], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent * 0.90, (width_bar / 2)]] line = plt.Polygon(points_rectangle, closed=True, fill=True, facecolor=colors[0], linewidth=0) rectangle_list += [line] # Create seperator points_separator = [[separator_pos, 0], [separator_pos + separator_indent, (width_bar / 2)], [separator_pos, width_bar]] line = plt.Polygon(points_separator, closed=None, fill=None, edgecolor=colors[1], lw=3) separator_list += [line] return rectangle_list, separator_list
python
def draw_bars(out_value, features, feature_type, width_separators, width_bar): """Draw the bars and separators.""" rectangle_list = [] separator_list = [] pre_val = out_value for index, features in zip(range(len(features)), features): if feature_type == 'positive': left_bound = float(features[0]) right_bound = pre_val pre_val = left_bound separator_indent = np.abs(width_separators) separator_pos = left_bound colors = ['#FF0D57', '#FFC3D5'] else: left_bound = pre_val right_bound = float(features[0]) pre_val = right_bound separator_indent = - np.abs(width_separators) separator_pos = right_bound colors = ['#1E88E5', '#D1E6FA'] # Create rectangle if index == 0: if feature_type == 'positive': points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent, (width_bar / 2)] ] else: points_rectangle = [[right_bound, 0], [left_bound, 0], [left_bound, width_bar], [right_bound, width_bar], [right_bound + separator_indent, (width_bar / 2)] ] else: points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound + separator_indent * 0.90, (width_bar / 2)], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent * 0.90, (width_bar / 2)]] line = plt.Polygon(points_rectangle, closed=True, fill=True, facecolor=colors[0], linewidth=0) rectangle_list += [line] # Create seperator points_separator = [[separator_pos, 0], [separator_pos + separator_indent, (width_bar / 2)], [separator_pos, width_bar]] line = plt.Polygon(points_separator, closed=None, fill=None, edgecolor=colors[1], lw=3) separator_list += [line] return rectangle_list, separator_list
[ "def", "draw_bars", "(", "out_value", ",", "features", ",", "feature_type", ",", "width_separators", ",", "width_bar", ")", ":", "rectangle_list", "=", "[", "]", "separator_list", "=", "[", "]", "pre_val", "=", "out_value", "for", "index", ",", "features", "in", "zip", "(", "range", "(", "len", "(", "features", ")", ")", ",", "features", ")", ":", "if", "feature_type", "==", "'positive'", ":", "left_bound", "=", "float", "(", "features", "[", "0", "]", ")", "right_bound", "=", "pre_val", "pre_val", "=", "left_bound", "separator_indent", "=", "np", ".", "abs", "(", "width_separators", ")", "separator_pos", "=", "left_bound", "colors", "=", "[", "'#FF0D57'", ",", "'#FFC3D5'", "]", "else", ":", "left_bound", "=", "pre_val", "right_bound", "=", "float", "(", "features", "[", "0", "]", ")", "pre_val", "=", "right_bound", "separator_indent", "=", "-", "np", ".", "abs", "(", "width_separators", ")", "separator_pos", "=", "right_bound", "colors", "=", "[", "'#1E88E5'", ",", "'#D1E6FA'", "]", "# Create rectangle", "if", "index", "==", "0", ":", "if", "feature_type", "==", "'positive'", ":", "points_rectangle", "=", "[", "[", "left_bound", ",", "0", "]", ",", "[", "right_bound", ",", "0", "]", ",", "[", "right_bound", ",", "width_bar", "]", ",", "[", "left_bound", ",", "width_bar", "]", ",", "[", "left_bound", "+", "separator_indent", ",", "(", "width_bar", "/", "2", ")", "]", "]", "else", ":", "points_rectangle", "=", "[", "[", "right_bound", ",", "0", "]", ",", "[", "left_bound", ",", "0", "]", ",", "[", "left_bound", ",", "width_bar", "]", ",", "[", "right_bound", ",", "width_bar", "]", ",", "[", "right_bound", "+", "separator_indent", ",", "(", "width_bar", "/", "2", ")", "]", "]", "else", ":", "points_rectangle", "=", "[", "[", "left_bound", ",", "0", "]", ",", "[", "right_bound", ",", "0", "]", ",", "[", "right_bound", "+", "separator_indent", "*", "0.90", ",", "(", "width_bar", "/", "2", ")", "]", ",", "[", "right_bound", ",", "width_bar", "]", ",", "[", "left_bound", ",", "width_bar", "]", ",", "[", "left_bound", "+", "separator_indent", "*", "0.90", ",", "(", "width_bar", "/", "2", ")", "]", "]", "line", "=", "plt", ".", "Polygon", "(", "points_rectangle", ",", "closed", "=", "True", ",", "fill", "=", "True", ",", "facecolor", "=", "colors", "[", "0", "]", ",", "linewidth", "=", "0", ")", "rectangle_list", "+=", "[", "line", "]", "# Create seperator", "points_separator", "=", "[", "[", "separator_pos", ",", "0", "]", ",", "[", "separator_pos", "+", "separator_indent", ",", "(", "width_bar", "/", "2", ")", "]", ",", "[", "separator_pos", ",", "width_bar", "]", "]", "line", "=", "plt", ".", "Polygon", "(", "points_separator", ",", "closed", "=", "None", ",", "fill", "=", "None", ",", "edgecolor", "=", "colors", "[", "1", "]", ",", "lw", "=", "3", ")", "separator_list", "+=", "[", "line", "]", "return", "rectangle_list", ",", "separator_list" ]
Draw the bars and separators.
[ "Draw", "the", "bars", "and", "separators", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force_matplotlib.py#L15-L77
24,202
slundberg/shap
shap/plots/force_matplotlib.py
format_data
def format_data(data): """Format data.""" # Format negative features neg_features = np.array([[data['features'][x]['effect'], data['features'][x]['value'], data['featureNames'][x]] for x in data['features'].keys() if data['features'][x]['effect'] < 0]) neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False)) # Format postive features pos_features = np.array([[data['features'][x]['effect'], data['features'][x]['value'], data['featureNames'][x]] for x in data['features'].keys() if data['features'][x]['effect'] >= 0]) pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True)) # Define link function if data['link'] == 'identity': convert_func = lambda x: x elif data['link'] == 'logit': convert_func = lambda x: 1 / (1 + np.exp(-x)) else: assert False, "ERROR: Unrecognized link function: " + str(data['link']) # Convert negative feature values to plot values neg_val = data['outValue'] for i in neg_features: val = float(i[0]) neg_val = neg_val + np.abs(val) i[0] = convert_func(neg_val) if len(neg_features) > 0: total_neg = np.max(neg_features[:, 0].astype(float)) - \ np.min(neg_features[:, 0].astype(float)) else: total_neg = 0 # Convert positive feature values to plot values pos_val = data['outValue'] for i in pos_features: val = float(i[0]) pos_val = pos_val - np.abs(val) i[0] = convert_func(pos_val) if len(pos_features) > 0: total_pos = np.max(pos_features[:, 0].astype(float)) - \ np.min(pos_features[:, 0].astype(float)) else: total_pos = 0 # Convert output value and base value data['outValue'] = convert_func(data['outValue']) data['baseValue'] = convert_func(data['baseValue']) return neg_features, total_neg, pos_features, total_pos
python
def format_data(data): """Format data.""" # Format negative features neg_features = np.array([[data['features'][x]['effect'], data['features'][x]['value'], data['featureNames'][x]] for x in data['features'].keys() if data['features'][x]['effect'] < 0]) neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False)) # Format postive features pos_features = np.array([[data['features'][x]['effect'], data['features'][x]['value'], data['featureNames'][x]] for x in data['features'].keys() if data['features'][x]['effect'] >= 0]) pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True)) # Define link function if data['link'] == 'identity': convert_func = lambda x: x elif data['link'] == 'logit': convert_func = lambda x: 1 / (1 + np.exp(-x)) else: assert False, "ERROR: Unrecognized link function: " + str(data['link']) # Convert negative feature values to plot values neg_val = data['outValue'] for i in neg_features: val = float(i[0]) neg_val = neg_val + np.abs(val) i[0] = convert_func(neg_val) if len(neg_features) > 0: total_neg = np.max(neg_features[:, 0].astype(float)) - \ np.min(neg_features[:, 0].astype(float)) else: total_neg = 0 # Convert positive feature values to plot values pos_val = data['outValue'] for i in pos_features: val = float(i[0]) pos_val = pos_val - np.abs(val) i[0] = convert_func(pos_val) if len(pos_features) > 0: total_pos = np.max(pos_features[:, 0].astype(float)) - \ np.min(pos_features[:, 0].astype(float)) else: total_pos = 0 # Convert output value and base value data['outValue'] = convert_func(data['outValue']) data['baseValue'] = convert_func(data['baseValue']) return neg_features, total_neg, pos_features, total_pos
[ "def", "format_data", "(", "data", ")", ":", "# Format negative features", "neg_features", "=", "np", ".", "array", "(", "[", "[", "data", "[", "'features'", "]", "[", "x", "]", "[", "'effect'", "]", ",", "data", "[", "'features'", "]", "[", "x", "]", "[", "'value'", "]", ",", "data", "[", "'featureNames'", "]", "[", "x", "]", "]", "for", "x", "in", "data", "[", "'features'", "]", ".", "keys", "(", ")", "if", "data", "[", "'features'", "]", "[", "x", "]", "[", "'effect'", "]", "<", "0", "]", ")", "neg_features", "=", "np", ".", "array", "(", "sorted", "(", "neg_features", ",", "key", "=", "lambda", "x", ":", "float", "(", "x", "[", "0", "]", ")", ",", "reverse", "=", "False", ")", ")", "# Format postive features", "pos_features", "=", "np", ".", "array", "(", "[", "[", "data", "[", "'features'", "]", "[", "x", "]", "[", "'effect'", "]", ",", "data", "[", "'features'", "]", "[", "x", "]", "[", "'value'", "]", ",", "data", "[", "'featureNames'", "]", "[", "x", "]", "]", "for", "x", "in", "data", "[", "'features'", "]", ".", "keys", "(", ")", "if", "data", "[", "'features'", "]", "[", "x", "]", "[", "'effect'", "]", ">=", "0", "]", ")", "pos_features", "=", "np", ".", "array", "(", "sorted", "(", "pos_features", ",", "key", "=", "lambda", "x", ":", "float", "(", "x", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", ")", "# Define link function", "if", "data", "[", "'link'", "]", "==", "'identity'", ":", "convert_func", "=", "lambda", "x", ":", "x", "elif", "data", "[", "'link'", "]", "==", "'logit'", ":", "convert_func", "=", "lambda", "x", ":", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "-", "x", ")", ")", "else", ":", "assert", "False", ",", "\"ERROR: Unrecognized link function: \"", "+", "str", "(", "data", "[", "'link'", "]", ")", "# Convert negative feature values to plot values", "neg_val", "=", "data", "[", "'outValue'", "]", "for", "i", "in", "neg_features", ":", "val", "=", "float", "(", "i", "[", "0", "]", ")", "neg_val", "=", "neg_val", "+", "np", ".", "abs", "(", "val", ")", "i", "[", "0", "]", "=", "convert_func", "(", "neg_val", ")", "if", "len", "(", "neg_features", ")", ">", "0", ":", "total_neg", "=", "np", ".", "max", "(", "neg_features", "[", ":", ",", "0", "]", ".", "astype", "(", "float", ")", ")", "-", "np", ".", "min", "(", "neg_features", "[", ":", ",", "0", "]", ".", "astype", "(", "float", ")", ")", "else", ":", "total_neg", "=", "0", "# Convert positive feature values to plot values", "pos_val", "=", "data", "[", "'outValue'", "]", "for", "i", "in", "pos_features", ":", "val", "=", "float", "(", "i", "[", "0", "]", ")", "pos_val", "=", "pos_val", "-", "np", ".", "abs", "(", "val", ")", "i", "[", "0", "]", "=", "convert_func", "(", "pos_val", ")", "if", "len", "(", "pos_features", ")", ">", "0", ":", "total_pos", "=", "np", ".", "max", "(", "pos_features", "[", ":", ",", "0", "]", ".", "astype", "(", "float", ")", ")", "-", "np", ".", "min", "(", "pos_features", "[", ":", ",", "0", "]", ".", "astype", "(", "float", ")", ")", "else", ":", "total_pos", "=", "0", "# Convert output value and base value", "data", "[", "'outValue'", "]", "=", "convert_func", "(", "data", "[", "'outValue'", "]", ")", "data", "[", "'baseValue'", "]", "=", "convert_func", "(", "data", "[", "'baseValue'", "]", ")", "return", "neg_features", ",", "total_neg", ",", "pos_features", ",", "total_pos" ]
Format data.
[ "Format", "data", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force_matplotlib.py#L199-L253
24,203
slundberg/shap
shap/plots/force_matplotlib.py
draw_additive_plot
def draw_additive_plot(data, figsize, show, text_rotation=0): """Draw additive plot.""" # Turn off interactive plot if show == False: plt.ioff() # Format data neg_features, total_neg, pos_features, total_pos = format_data(data) # Compute overall metrics base_value = data['baseValue'] out_value = data['outValue'] offset_text = (np.abs(total_neg) + np.abs(total_pos)) * 0.04 # Define plots fig, ax = plt.subplots(figsize=figsize) # Compute axis limit update_axis_limits(ax, total_pos, pos_features, total_neg, neg_features, base_value) # Define width of bar width_bar = 0.1 width_separators = (ax.get_xlim()[1] - ax.get_xlim()[0]) / 200 # Create bar for negative shap values rectangle_list, separator_list = draw_bars(out_value, neg_features, 'negative', width_separators, width_bar) for i in rectangle_list: ax.add_patch(i) for i in separator_list: ax.add_patch(i) # Create bar for positive shap values rectangle_list, separator_list = draw_bars(out_value, pos_features, 'positive', width_separators, width_bar) for i in rectangle_list: ax.add_patch(i) for i in separator_list: ax.add_patch(i) # Add labels total_effect = np.abs(total_neg) + total_pos fig, ax = draw_labels(fig, ax, out_value, neg_features, 'negative', offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation) fig, ax = draw_labels(fig, ax, out_value, pos_features, 'positive', offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation) # higher lower legend draw_higher_lower_element(out_value, offset_text) # Add label for base value draw_base_element(base_value, ax) # Add output label out_names = data['outNames'][0] draw_output_element(out_names, out_value, ax) if show: plt.show() else: return plt.gcf()
python
def draw_additive_plot(data, figsize, show, text_rotation=0): """Draw additive plot.""" # Turn off interactive plot if show == False: plt.ioff() # Format data neg_features, total_neg, pos_features, total_pos = format_data(data) # Compute overall metrics base_value = data['baseValue'] out_value = data['outValue'] offset_text = (np.abs(total_neg) + np.abs(total_pos)) * 0.04 # Define plots fig, ax = plt.subplots(figsize=figsize) # Compute axis limit update_axis_limits(ax, total_pos, pos_features, total_neg, neg_features, base_value) # Define width of bar width_bar = 0.1 width_separators = (ax.get_xlim()[1] - ax.get_xlim()[0]) / 200 # Create bar for negative shap values rectangle_list, separator_list = draw_bars(out_value, neg_features, 'negative', width_separators, width_bar) for i in rectangle_list: ax.add_patch(i) for i in separator_list: ax.add_patch(i) # Create bar for positive shap values rectangle_list, separator_list = draw_bars(out_value, pos_features, 'positive', width_separators, width_bar) for i in rectangle_list: ax.add_patch(i) for i in separator_list: ax.add_patch(i) # Add labels total_effect = np.abs(total_neg) + total_pos fig, ax = draw_labels(fig, ax, out_value, neg_features, 'negative', offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation) fig, ax = draw_labels(fig, ax, out_value, pos_features, 'positive', offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation) # higher lower legend draw_higher_lower_element(out_value, offset_text) # Add label for base value draw_base_element(base_value, ax) # Add output label out_names = data['outNames'][0] draw_output_element(out_names, out_value, ax) if show: plt.show() else: return plt.gcf()
[ "def", "draw_additive_plot", "(", "data", ",", "figsize", ",", "show", ",", "text_rotation", "=", "0", ")", ":", "# Turn off interactive plot", "if", "show", "==", "False", ":", "plt", ".", "ioff", "(", ")", "# Format data", "neg_features", ",", "total_neg", ",", "pos_features", ",", "total_pos", "=", "format_data", "(", "data", ")", "# Compute overall metrics", "base_value", "=", "data", "[", "'baseValue'", "]", "out_value", "=", "data", "[", "'outValue'", "]", "offset_text", "=", "(", "np", ".", "abs", "(", "total_neg", ")", "+", "np", ".", "abs", "(", "total_pos", ")", ")", "*", "0.04", "# Define plots", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "# Compute axis limit", "update_axis_limits", "(", "ax", ",", "total_pos", ",", "pos_features", ",", "total_neg", ",", "neg_features", ",", "base_value", ")", "# Define width of bar", "width_bar", "=", "0.1", "width_separators", "=", "(", "ax", ".", "get_xlim", "(", ")", "[", "1", "]", "-", "ax", ".", "get_xlim", "(", ")", "[", "0", "]", ")", "/", "200", "# Create bar for negative shap values", "rectangle_list", ",", "separator_list", "=", "draw_bars", "(", "out_value", ",", "neg_features", ",", "'negative'", ",", "width_separators", ",", "width_bar", ")", "for", "i", "in", "rectangle_list", ":", "ax", ".", "add_patch", "(", "i", ")", "for", "i", "in", "separator_list", ":", "ax", ".", "add_patch", "(", "i", ")", "# Create bar for positive shap values", "rectangle_list", ",", "separator_list", "=", "draw_bars", "(", "out_value", ",", "pos_features", ",", "'positive'", ",", "width_separators", ",", "width_bar", ")", "for", "i", "in", "rectangle_list", ":", "ax", ".", "add_patch", "(", "i", ")", "for", "i", "in", "separator_list", ":", "ax", ".", "add_patch", "(", "i", ")", "# Add labels", "total_effect", "=", "np", ".", "abs", "(", "total_neg", ")", "+", "total_pos", "fig", ",", "ax", "=", "draw_labels", "(", "fig", ",", "ax", ",", "out_value", ",", "neg_features", ",", "'negative'", ",", "offset_text", ",", "total_effect", ",", "min_perc", "=", "0.05", ",", "text_rotation", "=", "text_rotation", ")", "fig", ",", "ax", "=", "draw_labels", "(", "fig", ",", "ax", ",", "out_value", ",", "pos_features", ",", "'positive'", ",", "offset_text", ",", "total_effect", ",", "min_perc", "=", "0.05", ",", "text_rotation", "=", "text_rotation", ")", "# higher lower legend", "draw_higher_lower_element", "(", "out_value", ",", "offset_text", ")", "# Add label for base value", "draw_base_element", "(", "base_value", ",", "ax", ")", "# Add output label", "out_names", "=", "data", "[", "'outNames'", "]", "[", "0", "]", "draw_output_element", "(", "out_names", ",", "out_value", ",", "ax", ")", "if", "show", ":", "plt", ".", "show", "(", ")", "else", ":", "return", "plt", ".", "gcf", "(", ")" ]
Draw additive plot.
[ "Draw", "additive", "plot", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force_matplotlib.py#L333-L397
24,204
slundberg/shap
setup.py
try_run_setup
def try_run_setup(**kwargs): """ Fails gracefully when various install steps don't work. """ try: run_setup(**kwargs) except Exception as e: print(str(e)) if "xgboost" in str(e).lower(): kwargs["test_xgboost"] = False print("Couldn't install XGBoost for testing!") try_run_setup(**kwargs) elif "lightgbm" in str(e).lower(): kwargs["test_lightgbm"] = False print("Couldn't install LightGBM for testing!") try_run_setup(**kwargs) elif kwargs["with_binary"]: kwargs["with_binary"] = False print("WARNING: The C extension could not be compiled, sklearn tree models not supported.") try_run_setup(**kwargs) else: print("ERROR: Failed to build!")
python
def try_run_setup(**kwargs): """ Fails gracefully when various install steps don't work. """ try: run_setup(**kwargs) except Exception as e: print(str(e)) if "xgboost" in str(e).lower(): kwargs["test_xgboost"] = False print("Couldn't install XGBoost for testing!") try_run_setup(**kwargs) elif "lightgbm" in str(e).lower(): kwargs["test_lightgbm"] = False print("Couldn't install LightGBM for testing!") try_run_setup(**kwargs) elif kwargs["with_binary"]: kwargs["with_binary"] = False print("WARNING: The C extension could not be compiled, sklearn tree models not supported.") try_run_setup(**kwargs) else: print("ERROR: Failed to build!")
[ "def", "try_run_setup", "(", "*", "*", "kwargs", ")", ":", "try", ":", "run_setup", "(", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "print", "(", "str", "(", "e", ")", ")", "if", "\"xgboost\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "kwargs", "[", "\"test_xgboost\"", "]", "=", "False", "print", "(", "\"Couldn't install XGBoost for testing!\"", ")", "try_run_setup", "(", "*", "*", "kwargs", ")", "elif", "\"lightgbm\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "kwargs", "[", "\"test_lightgbm\"", "]", "=", "False", "print", "(", "\"Couldn't install LightGBM for testing!\"", ")", "try_run_setup", "(", "*", "*", "kwargs", ")", "elif", "kwargs", "[", "\"with_binary\"", "]", ":", "kwargs", "[", "\"with_binary\"", "]", "=", "False", "print", "(", "\"WARNING: The C extension could not be compiled, sklearn tree models not supported.\"", ")", "try_run_setup", "(", "*", "*", "kwargs", ")", "else", ":", "print", "(", "\"ERROR: Failed to build!\"", ")" ]
Fails gracefully when various install steps don't work.
[ "Fails", "gracefully", "when", "various", "install", "steps", "don", "t", "work", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/setup.py#L101-L122
24,205
slundberg/shap
shap/explainers/deep/deep_pytorch.py
deeplift_grad
def deeplift_grad(module, grad_input, grad_output): """The backward hook which computes the deeplift gradient for an nn.Module """ # first, get the module type module_type = module.__class__.__name__ # first, check the module is supported if module_type in op_handler: if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']: return op_handler[module_type](module, grad_input, grad_output) else: print('Warning: unrecognized nn.Module: {}'.format(module_type)) return grad_input
python
def deeplift_grad(module, grad_input, grad_output): """The backward hook which computes the deeplift gradient for an nn.Module """ # first, get the module type module_type = module.__class__.__name__ # first, check the module is supported if module_type in op_handler: if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']: return op_handler[module_type](module, grad_input, grad_output) else: print('Warning: unrecognized nn.Module: {}'.format(module_type)) return grad_input
[ "def", "deeplift_grad", "(", "module", ",", "grad_input", ",", "grad_output", ")", ":", "# first, get the module type", "module_type", "=", "module", ".", "__class__", ".", "__name__", "# first, check the module is supported", "if", "module_type", "in", "op_handler", ":", "if", "op_handler", "[", "module_type", "]", ".", "__name__", "not", "in", "[", "'passthrough'", ",", "'linear_1d'", "]", ":", "return", "op_handler", "[", "module_type", "]", "(", "module", ",", "grad_input", ",", "grad_output", ")", "else", ":", "print", "(", "'Warning: unrecognized nn.Module: {}'", ".", "format", "(", "module_type", ")", ")", "return", "grad_input" ]
The backward hook which computes the deeplift gradient for an nn.Module
[ "The", "backward", "hook", "which", "computes", "the", "deeplift", "gradient", "for", "an", "nn", ".", "Module" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L194-L206
24,206
slundberg/shap
shap/explainers/deep/deep_pytorch.py
add_interim_values
def add_interim_values(module, input, output): """The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers """ try: del module.x except AttributeError: pass try: del module.y except AttributeError: pass module_type = module.__class__.__name__ if module_type in op_handler: func_name = op_handler[module_type].__name__ # First, check for cases where we don't need to save the x and y tensors if func_name == 'passthrough': pass else: # check only the 0th input varies for i in range(len(input)): if i != 0 and type(output) is tuple: assert input[i] == output[i], "Only the 0th input may vary!" # if a new method is added, it must be added here too. This ensures tensors # are only saved if necessary if func_name in ['maxpool', 'nonlinear_1d']: # only save tensors if necessary if type(input) is tuple: setattr(module, 'x', torch.nn.Parameter(input[0].detach())) else: setattr(module, 'x', torch.nn.Parameter(input.detach())) if type(output) is tuple: setattr(module, 'y', torch.nn.Parameter(output[0].detach())) else: setattr(module, 'y', torch.nn.Parameter(output.detach())) if module_type in failure_case_modules: input[0].register_hook(deeplift_tensor_grad)
python
def add_interim_values(module, input, output): """The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers """ try: del module.x except AttributeError: pass try: del module.y except AttributeError: pass module_type = module.__class__.__name__ if module_type in op_handler: func_name = op_handler[module_type].__name__ # First, check for cases where we don't need to save the x and y tensors if func_name == 'passthrough': pass else: # check only the 0th input varies for i in range(len(input)): if i != 0 and type(output) is tuple: assert input[i] == output[i], "Only the 0th input may vary!" # if a new method is added, it must be added here too. This ensures tensors # are only saved if necessary if func_name in ['maxpool', 'nonlinear_1d']: # only save tensors if necessary if type(input) is tuple: setattr(module, 'x', torch.nn.Parameter(input[0].detach())) else: setattr(module, 'x', torch.nn.Parameter(input.detach())) if type(output) is tuple: setattr(module, 'y', torch.nn.Parameter(output[0].detach())) else: setattr(module, 'y', torch.nn.Parameter(output.detach())) if module_type in failure_case_modules: input[0].register_hook(deeplift_tensor_grad)
[ "def", "add_interim_values", "(", "module", ",", "input", ",", "output", ")", ":", "try", ":", "del", "module", ".", "x", "except", "AttributeError", ":", "pass", "try", ":", "del", "module", ".", "y", "except", "AttributeError", ":", "pass", "module_type", "=", "module", ".", "__class__", ".", "__name__", "if", "module_type", "in", "op_handler", ":", "func_name", "=", "op_handler", "[", "module_type", "]", ".", "__name__", "# First, check for cases where we don't need to save the x and y tensors", "if", "func_name", "==", "'passthrough'", ":", "pass", "else", ":", "# check only the 0th input varies", "for", "i", "in", "range", "(", "len", "(", "input", ")", ")", ":", "if", "i", "!=", "0", "and", "type", "(", "output", ")", "is", "tuple", ":", "assert", "input", "[", "i", "]", "==", "output", "[", "i", "]", ",", "\"Only the 0th input may vary!\"", "# if a new method is added, it must be added here too. This ensures tensors", "# are only saved if necessary", "if", "func_name", "in", "[", "'maxpool'", ",", "'nonlinear_1d'", "]", ":", "# only save tensors if necessary", "if", "type", "(", "input", ")", "is", "tuple", ":", "setattr", "(", "module", ",", "'x'", ",", "torch", ".", "nn", ".", "Parameter", "(", "input", "[", "0", "]", ".", "detach", "(", ")", ")", ")", "else", ":", "setattr", "(", "module", ",", "'x'", ",", "torch", ".", "nn", ".", "Parameter", "(", "input", ".", "detach", "(", ")", ")", ")", "if", "type", "(", "output", ")", "is", "tuple", ":", "setattr", "(", "module", ",", "'y'", ",", "torch", ".", "nn", ".", "Parameter", "(", "output", "[", "0", "]", ".", "detach", "(", ")", ")", ")", "else", ":", "setattr", "(", "module", ",", "'y'", ",", "torch", ".", "nn", ".", "Parameter", "(", "output", ".", "detach", "(", ")", ")", ")", "if", "module_type", "in", "failure_case_modules", ":", "input", "[", "0", "]", ".", "register_hook", "(", "deeplift_tensor_grad", ")" ]
The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers
[ "The", "forward", "hook", "used", "to", "save", "interim", "tensors", "detached", "from", "the", "graph", ".", "Used", "to", "calculate", "the", "multipliers" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L209-L245
24,207
slundberg/shap
shap/explainers/deep/deep_pytorch.py
get_target_input
def get_target_input(module, input, output): """A forward hook which saves the tensor - attached to its graph. Used if we want to explain the interim outputs of a model """ try: del module.target_input except AttributeError: pass setattr(module, 'target_input', input)
python
def get_target_input(module, input, output): """A forward hook which saves the tensor - attached to its graph. Used if we want to explain the interim outputs of a model """ try: del module.target_input except AttributeError: pass setattr(module, 'target_input', input)
[ "def", "get_target_input", "(", "module", ",", "input", ",", "output", ")", ":", "try", ":", "del", "module", ".", "target_input", "except", "AttributeError", ":", "pass", "setattr", "(", "module", ",", "'target_input'", ",", "input", ")" ]
A forward hook which saves the tensor - attached to its graph. Used if we want to explain the interim outputs of a model
[ "A", "forward", "hook", "which", "saves", "the", "tensor", "-", "attached", "to", "its", "graph", ".", "Used", "if", "we", "want", "to", "explain", "the", "interim", "outputs", "of", "a", "model" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L248-L256
24,208
slundberg/shap
shap/explainers/deep/deep_pytorch.py
PyTorchDeepExplainer.add_handles
def add_handles(self, model, forward_handle, backward_handle): """ Add handles to all non-container layers in the model. Recursively for non-container layers """ handles_list = [] for child in model.children(): if 'nn.modules.container' in str(type(child)): handles_list.extend(self.add_handles(child, forward_handle, backward_handle)) else: handles_list.append(child.register_forward_hook(forward_handle)) handles_list.append(child.register_backward_hook(backward_handle)) return handles_list
python
def add_handles(self, model, forward_handle, backward_handle): """ Add handles to all non-container layers in the model. Recursively for non-container layers """ handles_list = [] for child in model.children(): if 'nn.modules.container' in str(type(child)): handles_list.extend(self.add_handles(child, forward_handle, backward_handle)) else: handles_list.append(child.register_forward_hook(forward_handle)) handles_list.append(child.register_backward_hook(backward_handle)) return handles_list
[ "def", "add_handles", "(", "self", ",", "model", ",", "forward_handle", ",", "backward_handle", ")", ":", "handles_list", "=", "[", "]", "for", "child", "in", "model", ".", "children", "(", ")", ":", "if", "'nn.modules.container'", "in", "str", "(", "type", "(", "child", ")", ")", ":", "handles_list", ".", "extend", "(", "self", ".", "add_handles", "(", "child", ",", "forward_handle", ",", "backward_handle", ")", ")", "else", ":", "handles_list", ".", "append", "(", "child", ".", "register_forward_hook", "(", "forward_handle", ")", ")", "handles_list", ".", "append", "(", "child", ".", "register_backward_hook", "(", "backward_handle", ")", ")", "return", "handles_list" ]
Add handles to all non-container layers in the model. Recursively for non-container layers
[ "Add", "handles", "to", "all", "non", "-", "container", "layers", "in", "the", "model", ".", "Recursively", "for", "non", "-", "container", "layers" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L64-L76
24,209
slundberg/shap
shap/explainers/deep/deep_pytorch.py
PyTorchDeepExplainer.remove_attributes
def remove_attributes(self, model): """ Removes the x and y attributes which were added by the forward handles Recursively searches for non-container layers """ for child in model.children(): if 'nn.modules.container' in str(type(child)): self.remove_attributes(child) else: try: del child.x except AttributeError: pass try: del child.y except AttributeError: pass
python
def remove_attributes(self, model): """ Removes the x and y attributes which were added by the forward handles Recursively searches for non-container layers """ for child in model.children(): if 'nn.modules.container' in str(type(child)): self.remove_attributes(child) else: try: del child.x except AttributeError: pass try: del child.y except AttributeError: pass
[ "def", "remove_attributes", "(", "self", ",", "model", ")", ":", "for", "child", "in", "model", ".", "children", "(", ")", ":", "if", "'nn.modules.container'", "in", "str", "(", "type", "(", "child", ")", ")", ":", "self", ".", "remove_attributes", "(", "child", ")", "else", ":", "try", ":", "del", "child", ".", "x", "except", "AttributeError", ":", "pass", "try", ":", "del", "child", ".", "y", "except", "AttributeError", ":", "pass" ]
Removes the x and y attributes which were added by the forward handles Recursively searches for non-container layers
[ "Removes", "the", "x", "and", "y", "attributes", "which", "were", "added", "by", "the", "forward", "handles", "Recursively", "searches", "for", "non", "-", "container", "layers" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L78-L94
24,210
slundberg/shap
shap/explainers/tree.py
get_xgboost_json
def get_xgboost_json(model): """ This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes. """ fnames = model.feature_names model.feature_names = None json_trees = model.get_dump(with_stats=True, dump_format="json") model.feature_names = fnames # this fixes a bug where XGBoost can return invalid JSON json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees] json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees] return json_trees
python
def get_xgboost_json(model): """ This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes. """ fnames = model.feature_names model.feature_names = None json_trees = model.get_dump(with_stats=True, dump_format="json") model.feature_names = fnames # this fixes a bug where XGBoost can return invalid JSON json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees] json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees] return json_trees
[ "def", "get_xgboost_json", "(", "model", ")", ":", "fnames", "=", "model", ".", "feature_names", "model", ".", "feature_names", "=", "None", "json_trees", "=", "model", ".", "get_dump", "(", "with_stats", "=", "True", ",", "dump_format", "=", "\"json\"", ")", "model", ".", "feature_names", "=", "fnames", "# this fixes a bug where XGBoost can return invalid JSON", "json_trees", "=", "[", "t", ".", "replace", "(", "\": inf,\"", ",", "\": 1000000000000.0,\"", ")", "for", "t", "in", "json_trees", "]", "json_trees", "=", "[", "t", ".", "replace", "(", "\": -inf,\"", ",", "\": -1000000000000.0,\"", ")", "for", "t", "in", "json_trees", "]", "return", "json_trees" ]
This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
[ "This", "gets", "a", "JSON", "dump", "of", "an", "XGBoost", "model", "while", "ensuring", "the", "features", "names", "are", "their", "indexes", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L907-L919
24,211
slundberg/shap
shap/explainers/tree.py
TreeExplainer.__dynamic_expected_value
def __dynamic_expected_value(self, y): """ This computes the expected value conditioned on the given label value. """ return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0)
python
def __dynamic_expected_value(self, y): """ This computes the expected value conditioned on the given label value. """ return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0)
[ "def", "__dynamic_expected_value", "(", "self", ",", "y", ")", ":", "return", "self", ".", "model", ".", "predict", "(", "self", ".", "data", ",", "np", ".", "ones", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", "*", "y", ",", "output", "=", "self", ".", "model_output", ")", ".", "mean", "(", "0", ")" ]
This computes the expected value conditioned on the given label value.
[ "This", "computes", "the", "expected", "value", "conditioned", "on", "the", "given", "label", "value", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L125-L129
24,212
slundberg/shap
shap/explainers/gradient.py
GradientExplainer.shap_values
def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None): """ Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed)
python
def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None): """ Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed)
[ "def", "shap_values", "(", "self", ",", "X", ",", "nsamples", "=", "200", ",", "ranked_outputs", "=", "None", ",", "output_rank_order", "=", "\"max\"", ",", "rseed", "=", "None", ")", ":", "return", "self", ".", "explainer", ".", "shap_values", "(", "X", ",", "nsamples", ",", "ranked_outputs", ",", "output_rank_order", ",", "rseed", ")" ]
Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top".
[ "Return", "the", "values", "for", "the", "model", "applied", "to", "X", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/gradient.py#L75-L112
24,213
slundberg/shap
shap/plots/force.py
save_html
def save_html(out_file, plot_html): """ Save html plots to an output file. """ internal_open = False if type(out_file) == str: out_file = open(out_file, "w") internal_open = True out_file.write("<html><head><script>\n") # dump the js code bundle_path = os.path.join(os.path.split(__file__)[0], "resources", "bundle.js") with io.open(bundle_path, encoding="utf-8") as f: bundle_data = f.read() out_file.write(bundle_data) out_file.write("</script></head><body>\n") out_file.write(plot_html.data) out_file.write("</body></html>\n") if internal_open: out_file.close()
python
def save_html(out_file, plot_html): """ Save html plots to an output file. """ internal_open = False if type(out_file) == str: out_file = open(out_file, "w") internal_open = True out_file.write("<html><head><script>\n") # dump the js code bundle_path = os.path.join(os.path.split(__file__)[0], "resources", "bundle.js") with io.open(bundle_path, encoding="utf-8") as f: bundle_data = f.read() out_file.write(bundle_data) out_file.write("</script></head><body>\n") out_file.write(plot_html.data) out_file.write("</body></html>\n") if internal_open: out_file.close()
[ "def", "save_html", "(", "out_file", ",", "plot_html", ")", ":", "internal_open", "=", "False", "if", "type", "(", "out_file", ")", "==", "str", ":", "out_file", "=", "open", "(", "out_file", ",", "\"w\"", ")", "internal_open", "=", "True", "out_file", ".", "write", "(", "\"<html><head><script>\\n\"", ")", "# dump the js code", "bundle_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "split", "(", "__file__", ")", "[", "0", "]", ",", "\"resources\"", ",", "\"bundle.js\"", ")", "with", "io", ".", "open", "(", "bundle_path", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "bundle_data", "=", "f", ".", "read", "(", ")", "out_file", ".", "write", "(", "bundle_data", ")", "out_file", ".", "write", "(", "\"</script></head><body>\\n\"", ")", "out_file", ".", "write", "(", "plot_html", ".", "data", ")", "out_file", ".", "write", "(", "\"</body></html>\\n\"", ")", "if", "internal_open", ":", "out_file", ".", "close", "(", ")" ]
Save html plots to an output file.
[ "Save", "html", "plots", "to", "an", "output", "file", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force.py#L217-L239
24,214
slundberg/shap
shap/explainers/deep/deep_tf.py
tensors_blocked_by_false
def tensors_blocked_by_false(ops): """ Follows a set of ops assuming their value is False and find blocked Switch paths. This is used to prune away parts of the model graph that are only used during the training phase (like dropout, batch norm, etc.). """ blocked = [] def recurse(op): if op.type == "Switch": blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False else: for out in op.outputs: for c in out.consumers(): recurse(c) for op in ops: recurse(op) return blocked
python
def tensors_blocked_by_false(ops): """ Follows a set of ops assuming their value is False and find blocked Switch paths. This is used to prune away parts of the model graph that are only used during the training phase (like dropout, batch norm, etc.). """ blocked = [] def recurse(op): if op.type == "Switch": blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False else: for out in op.outputs: for c in out.consumers(): recurse(c) for op in ops: recurse(op) return blocked
[ "def", "tensors_blocked_by_false", "(", "ops", ")", ":", "blocked", "=", "[", "]", "def", "recurse", "(", "op", ")", ":", "if", "op", ".", "type", "==", "\"Switch\"", ":", "blocked", ".", "append", "(", "op", ".", "outputs", "[", "1", "]", ")", "# the true path is blocked since we assume the ops we trace are False", "else", ":", "for", "out", "in", "op", ".", "outputs", ":", "for", "c", "in", "out", ".", "consumers", "(", ")", ":", "recurse", "(", "c", ")", "for", "op", "in", "ops", ":", "recurse", "(", "op", ")", "return", "blocked" ]
Follows a set of ops assuming their value is False and find blocked Switch paths. This is used to prune away parts of the model graph that are only used during the training phase (like dropout, batch norm, etc.).
[ "Follows", "a", "set", "of", "ops", "assuming", "their", "value", "is", "False", "and", "find", "blocked", "Switch", "paths", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L290-L307
24,215
slundberg/shap
shap/explainers/deep/deep_tf.py
TFDeepExplainer.phi_symbolic
def phi_symbolic(self, i): """ Get the SHAP value computation graph for a given model output. """ if self.phi_symbolics[i] is None: # replace the gradients for all the non-linear activations # we do this by hacking our way into the registry (TODO: find a public API for this if it exists) reg = tf_ops._gradient_registry._registry for n in op_handlers: if n in reg: self.orig_grads[n] = reg[n]["type"] if op_handlers[n] is not passthrough: reg[n]["type"] = self.custom_grad elif n in self.used_types: raise Exception(n + " was used in the model but is not in the gradient registry!") # In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped # unfortunately that includes the index of embedding layers so we disable that check here if hasattr(tf_gradients_impl, "_IsBackpropagatable"): orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable tf_gradients_impl._IsBackpropagatable = lambda tensor: True # define the computation graph for the attribution values using custom a gradient-like computation try: out = self.model_output[:,i] if self.multi_output else self.model_output self.phi_symbolics[i] = tf.gradients(out, self.model_inputs) finally: # reinstate the backpropagatable check if hasattr(tf_gradients_impl, "_IsBackpropagatable"): tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable # restore the original gradient definitions for n in op_handlers: if n in reg: reg[n]["type"] = self.orig_grads[n] return self.phi_symbolics[i]
python
def phi_symbolic(self, i): """ Get the SHAP value computation graph for a given model output. """ if self.phi_symbolics[i] is None: # replace the gradients for all the non-linear activations # we do this by hacking our way into the registry (TODO: find a public API for this if it exists) reg = tf_ops._gradient_registry._registry for n in op_handlers: if n in reg: self.orig_grads[n] = reg[n]["type"] if op_handlers[n] is not passthrough: reg[n]["type"] = self.custom_grad elif n in self.used_types: raise Exception(n + " was used in the model but is not in the gradient registry!") # In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped # unfortunately that includes the index of embedding layers so we disable that check here if hasattr(tf_gradients_impl, "_IsBackpropagatable"): orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable tf_gradients_impl._IsBackpropagatable = lambda tensor: True # define the computation graph for the attribution values using custom a gradient-like computation try: out = self.model_output[:,i] if self.multi_output else self.model_output self.phi_symbolics[i] = tf.gradients(out, self.model_inputs) finally: # reinstate the backpropagatable check if hasattr(tf_gradients_impl, "_IsBackpropagatable"): tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable # restore the original gradient definitions for n in op_handlers: if n in reg: reg[n]["type"] = self.orig_grads[n] return self.phi_symbolics[i]
[ "def", "phi_symbolic", "(", "self", ",", "i", ")", ":", "if", "self", ".", "phi_symbolics", "[", "i", "]", "is", "None", ":", "# replace the gradients for all the non-linear activations", "# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)", "reg", "=", "tf_ops", ".", "_gradient_registry", ".", "_registry", "for", "n", "in", "op_handlers", ":", "if", "n", "in", "reg", ":", "self", ".", "orig_grads", "[", "n", "]", "=", "reg", "[", "n", "]", "[", "\"type\"", "]", "if", "op_handlers", "[", "n", "]", "is", "not", "passthrough", ":", "reg", "[", "n", "]", "[", "\"type\"", "]", "=", "self", ".", "custom_grad", "elif", "n", "in", "self", ".", "used_types", ":", "raise", "Exception", "(", "n", "+", "\" was used in the model but is not in the gradient registry!\"", ")", "# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped", "# unfortunately that includes the index of embedding layers so we disable that check here", "if", "hasattr", "(", "tf_gradients_impl", ",", "\"_IsBackpropagatable\"", ")", ":", "orig_IsBackpropagatable", "=", "tf_gradients_impl", ".", "_IsBackpropagatable", "tf_gradients_impl", ".", "_IsBackpropagatable", "=", "lambda", "tensor", ":", "True", "# define the computation graph for the attribution values using custom a gradient-like computation", "try", ":", "out", "=", "self", ".", "model_output", "[", ":", ",", "i", "]", "if", "self", ".", "multi_output", "else", "self", ".", "model_output", "self", ".", "phi_symbolics", "[", "i", "]", "=", "tf", ".", "gradients", "(", "out", ",", "self", ".", "model_inputs", ")", "finally", ":", "# reinstate the backpropagatable check", "if", "hasattr", "(", "tf_gradients_impl", ",", "\"_IsBackpropagatable\"", ")", ":", "tf_gradients_impl", ".", "_IsBackpropagatable", "=", "orig_IsBackpropagatable", "# restore the original gradient definitions", "for", "n", "in", "op_handlers", ":", "if", "n", "in", "reg", ":", "reg", "[", "n", "]", "[", "\"type\"", "]", "=", "self", ".", "orig_grads", "[", "n", "]", "return", "self", ".", "phi_symbolics", "[", "i", "]" ]
Get the SHAP value computation graph for a given model output.
[ "Get", "the", "SHAP", "value", "computation", "graph", "for", "a", "given", "model", "output", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L178-L214
24,216
slundberg/shap
shap/explainers/deep/deep_tf.py
TFDeepExplainer.run
def run(self, out, model_inputs, X): """ Runs the model while also setting the learning phase flags to False. """ feed_dict = dict(zip(model_inputs, X)) for t in self.learning_phase_flags: feed_dict[t] = False return self.session.run(out, feed_dict)
python
def run(self, out, model_inputs, X): """ Runs the model while also setting the learning phase flags to False. """ feed_dict = dict(zip(model_inputs, X)) for t in self.learning_phase_flags: feed_dict[t] = False return self.session.run(out, feed_dict)
[ "def", "run", "(", "self", ",", "out", ",", "model_inputs", ",", "X", ")", ":", "feed_dict", "=", "dict", "(", "zip", "(", "model_inputs", ",", "X", ")", ")", "for", "t", "in", "self", ".", "learning_phase_flags", ":", "feed_dict", "[", "t", "]", "=", "False", "return", "self", ".", "session", ".", "run", "(", "out", ",", "feed_dict", ")" ]
Runs the model while also setting the learning phase flags to False.
[ "Runs", "the", "model", "while", "also", "setting", "the", "learning", "phase", "flags", "to", "False", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L276-L282
24,217
slundberg/shap
shap/explainers/deep/deep_tf.py
TFDeepExplainer.custom_grad
def custom_grad(self, op, *grads): """ Passes a gradient op creation request to the correct handler. """ return op_handlers[op.type](self, op, *grads)
python
def custom_grad(self, op, *grads): """ Passes a gradient op creation request to the correct handler. """ return op_handlers[op.type](self, op, *grads)
[ "def", "custom_grad", "(", "self", ",", "op", ",", "*", "grads", ")", ":", "return", "op_handlers", "[", "op", ".", "type", "]", "(", "self", ",", "op", ",", "*", "grads", ")" ]
Passes a gradient op creation request to the correct handler.
[ "Passes", "a", "gradient", "op", "creation", "request", "to", "the", "correct", "handler", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L284-L287
24,218
slundberg/shap
shap/benchmark/experiments.py
run_remote_experiments
def run_remote_experiments(experiments, thread_hosts, rate_limit=10): """ Use ssh to run the experiments on remote machines in parallel. Parameters ---------- experiments : iterable Output of shap.benchmark.experiments(...). thread_hosts : list of strings Each host has the format "host_name:path_to_python_binary" and can appear multiple times in the list (one for each parallel execution you want on that machine). rate_limit : int How many ssh connections we make per minute to each host (to avoid throttling issues). """ global ssh_conn_per_min_limit ssh_conn_per_min_limit = rate_limit # first we kill any remaining workers from previous runs # note we don't check_call because pkill kills our ssh call as well thread_hosts = copy.copy(thread_hosts) random.shuffle(thread_hosts) for host in set(thread_hosts): hostname,_ = host.split(":") try: subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15) except subprocess.TimeoutExpired: print("Failed to connect to", hostname, "after 15 seconds! Exiting.") return experiments = copy.copy(list(experiments)) random.shuffle(experiments) # this way all the hard experiments don't get put on one machine global nexperiments, total_sent, total_done, total_failed, host_records nexperiments = len(experiments) total_sent = 0 total_done = 0 total_failed = 0 host_records = {} q = Queue() for host in thread_hosts: worker = Thread(target=__thread_worker, args=(q, host)) worker.setDaemon(True) worker.start() for experiment in experiments: q.put(experiment) q.join()
python
def run_remote_experiments(experiments, thread_hosts, rate_limit=10): """ Use ssh to run the experiments on remote machines in parallel. Parameters ---------- experiments : iterable Output of shap.benchmark.experiments(...). thread_hosts : list of strings Each host has the format "host_name:path_to_python_binary" and can appear multiple times in the list (one for each parallel execution you want on that machine). rate_limit : int How many ssh connections we make per minute to each host (to avoid throttling issues). """ global ssh_conn_per_min_limit ssh_conn_per_min_limit = rate_limit # first we kill any remaining workers from previous runs # note we don't check_call because pkill kills our ssh call as well thread_hosts = copy.copy(thread_hosts) random.shuffle(thread_hosts) for host in set(thread_hosts): hostname,_ = host.split(":") try: subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15) except subprocess.TimeoutExpired: print("Failed to connect to", hostname, "after 15 seconds! Exiting.") return experiments = copy.copy(list(experiments)) random.shuffle(experiments) # this way all the hard experiments don't get put on one machine global nexperiments, total_sent, total_done, total_failed, host_records nexperiments = len(experiments) total_sent = 0 total_done = 0 total_failed = 0 host_records = {} q = Queue() for host in thread_hosts: worker = Thread(target=__thread_worker, args=(q, host)) worker.setDaemon(True) worker.start() for experiment in experiments: q.put(experiment) q.join()
[ "def", "run_remote_experiments", "(", "experiments", ",", "thread_hosts", ",", "rate_limit", "=", "10", ")", ":", "global", "ssh_conn_per_min_limit", "ssh_conn_per_min_limit", "=", "rate_limit", "# first we kill any remaining workers from previous runs", "# note we don't check_call because pkill kills our ssh call as well", "thread_hosts", "=", "copy", ".", "copy", "(", "thread_hosts", ")", "random", ".", "shuffle", "(", "thread_hosts", ")", "for", "host", "in", "set", "(", "thread_hosts", ")", ":", "hostname", ",", "_", "=", "host", ".", "split", "(", "\":\"", ")", "try", ":", "subprocess", ".", "run", "(", "[", "\"ssh\"", ",", "hostname", ",", "\"pkill -f shap.benchmark.run_experiment\"", "]", ",", "timeout", "=", "15", ")", "except", "subprocess", ".", "TimeoutExpired", ":", "print", "(", "\"Failed to connect to\"", ",", "hostname", ",", "\"after 15 seconds! Exiting.\"", ")", "return", "experiments", "=", "copy", ".", "copy", "(", "list", "(", "experiments", ")", ")", "random", ".", "shuffle", "(", "experiments", ")", "# this way all the hard experiments don't get put on one machine", "global", "nexperiments", ",", "total_sent", ",", "total_done", ",", "total_failed", ",", "host_records", "nexperiments", "=", "len", "(", "experiments", ")", "total_sent", "=", "0", "total_done", "=", "0", "total_failed", "=", "0", "host_records", "=", "{", "}", "q", "=", "Queue", "(", ")", "for", "host", "in", "thread_hosts", ":", "worker", "=", "Thread", "(", "target", "=", "__thread_worker", ",", "args", "=", "(", "q", ",", "host", ")", ")", "worker", ".", "setDaemon", "(", "True", ")", "worker", ".", "start", "(", ")", "for", "experiment", "in", "experiments", ":", "q", ".", "put", "(", "experiment", ")", "q", ".", "join", "(", ")" ]
Use ssh to run the experiments on remote machines in parallel. Parameters ---------- experiments : iterable Output of shap.benchmark.experiments(...). thread_hosts : list of strings Each host has the format "host_name:path_to_python_binary" and can appear multiple times in the list (one for each parallel execution you want on that machine). rate_limit : int How many ssh connections we make per minute to each host (to avoid throttling issues).
[ "Use", "ssh", "to", "run", "the", "experiments", "on", "remote", "machines", "in", "parallel", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/experiments.py#L322-L372
24,219
slundberg/shap
shap/explainers/kernel.py
kmeans
def kmeans(X, k, round_values=True): """ Summarize a dataset with k mean samples weighted by the number of data points they each represent. Parameters ---------- X : numpy.array or pandas.DataFrame Matrix of data samples to summarize (# samples x # features) k : int Number of means to use for approximation. round_values : bool For all i, round the ith dimension of each mean sample to match the nearest value from X[:,i]. This ensures discrete features always get a valid value. Returns ------- DenseData object. """ group_names = [str(i) for i in range(X.shape[1])] if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): group_names = X.columns X = X.values kmeans = KMeans(n_clusters=k, random_state=0).fit(X) if round_values: for i in range(k): for j in range(X.shape[1]): ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j])) kmeans.cluster_centers_[i,j] = X[ind,j] return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
python
def kmeans(X, k, round_values=True): """ Summarize a dataset with k mean samples weighted by the number of data points they each represent. Parameters ---------- X : numpy.array or pandas.DataFrame Matrix of data samples to summarize (# samples x # features) k : int Number of means to use for approximation. round_values : bool For all i, round the ith dimension of each mean sample to match the nearest value from X[:,i]. This ensures discrete features always get a valid value. Returns ------- DenseData object. """ group_names = [str(i) for i in range(X.shape[1])] if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): group_names = X.columns X = X.values kmeans = KMeans(n_clusters=k, random_state=0).fit(X) if round_values: for i in range(k): for j in range(X.shape[1]): ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j])) kmeans.cluster_centers_[i,j] = X[ind,j] return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
[ "def", "kmeans", "(", "X", ",", "k", ",", "round_values", "=", "True", ")", ":", "group_names", "=", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", "]", "if", "str", "(", "type", "(", "X", ")", ")", ".", "endswith", "(", "\"'pandas.core.frame.DataFrame'>\"", ")", ":", "group_names", "=", "X", ".", "columns", "X", "=", "X", ".", "values", "kmeans", "=", "KMeans", "(", "n_clusters", "=", "k", ",", "random_state", "=", "0", ")", ".", "fit", "(", "X", ")", "if", "round_values", ":", "for", "i", "in", "range", "(", "k", ")", ":", "for", "j", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ":", "ind", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "X", "[", ":", ",", "j", "]", "-", "kmeans", ".", "cluster_centers_", "[", "i", ",", "j", "]", ")", ")", "kmeans", ".", "cluster_centers_", "[", "i", ",", "j", "]", "=", "X", "[", "ind", ",", "j", "]", "return", "DenseData", "(", "kmeans", ".", "cluster_centers_", ",", "group_names", ",", "None", ",", "1.0", "*", "np", ".", "bincount", "(", "kmeans", ".", "labels_", ")", ")" ]
Summarize a dataset with k mean samples weighted by the number of data points they each represent. Parameters ---------- X : numpy.array or pandas.DataFrame Matrix of data samples to summarize (# samples x # features) k : int Number of means to use for approximation. round_values : bool For all i, round the ith dimension of each mean sample to match the nearest value from X[:,i]. This ensures discrete features always get a valid value. Returns ------- DenseData object.
[ "Summarize", "a", "dataset", "with", "k", "mean", "samples", "weighted", "by", "the", "number", "of", "data", "points", "they", "each", "represent", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/kernel.py#L18-L50
24,220
slundberg/shap
shap/plots/embedding.py
embedding_plot
def embedding_plot(ind, shap_values, feature_names=None, method="pca", alpha=1.0, show=True): """ Use the SHAP values as an embedding which we project to 2D for visualization. Parameters ---------- ind : int or string If this is an int it is the index of the feature to use to color the embedding. If this is a string it is either the name of the feature, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values, which is the model's output (minus it's expected value). shap_values : numpy.array Matrix of SHAP values (# samples x # features). feature_names : None or list The names of the features in the shap_values array. method : "pca" or numpy.array How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D PCA projection of shap_values is used. If a numpy array then is should be (# samples x 2) and represent the embedding of that values. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset. """ if feature_names is None: feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])] ind = convert_name(ind, shap_values, feature_names) if ind == "sum()": cvals = shap_values.sum(1) fname = "sum(SHAP values)" else: cvals = shap_values[:,ind] fname = feature_names[ind] # see if we need to compute the embedding if type(method) == str and method == "pca": pca = sklearn.decomposition.PCA(2) embedding_values = pca.fit_transform(shap_values) elif hasattr(method, "shape") and method.shape[1] == 2: embedding_values = method else: print("Unsupported embedding method:", method) pl.scatter( embedding_values[:,0], embedding_values[:,1], c=cvals, cmap=colors.red_blue, alpha=alpha, linewidth=0 ) pl.axis("off") #pl.title(feature_names[ind]) cb = pl.colorbar() cb.set_label("SHAP value for\n"+fname, size=13) cb.outline.set_visible(False) pl.gcf().set_size_inches(7.5, 5) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 10) cb.set_alpha(1) if show: pl.show()
python
def embedding_plot(ind, shap_values, feature_names=None, method="pca", alpha=1.0, show=True): """ Use the SHAP values as an embedding which we project to 2D for visualization. Parameters ---------- ind : int or string If this is an int it is the index of the feature to use to color the embedding. If this is a string it is either the name of the feature, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values, which is the model's output (minus it's expected value). shap_values : numpy.array Matrix of SHAP values (# samples x # features). feature_names : None or list The names of the features in the shap_values array. method : "pca" or numpy.array How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D PCA projection of shap_values is used. If a numpy array then is should be (# samples x 2) and represent the embedding of that values. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset. """ if feature_names is None: feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])] ind = convert_name(ind, shap_values, feature_names) if ind == "sum()": cvals = shap_values.sum(1) fname = "sum(SHAP values)" else: cvals = shap_values[:,ind] fname = feature_names[ind] # see if we need to compute the embedding if type(method) == str and method == "pca": pca = sklearn.decomposition.PCA(2) embedding_values = pca.fit_transform(shap_values) elif hasattr(method, "shape") and method.shape[1] == 2: embedding_values = method else: print("Unsupported embedding method:", method) pl.scatter( embedding_values[:,0], embedding_values[:,1], c=cvals, cmap=colors.red_blue, alpha=alpha, linewidth=0 ) pl.axis("off") #pl.title(feature_names[ind]) cb = pl.colorbar() cb.set_label("SHAP value for\n"+fname, size=13) cb.outline.set_visible(False) pl.gcf().set_size_inches(7.5, 5) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 10) cb.set_alpha(1) if show: pl.show()
[ "def", "embedding_plot", "(", "ind", ",", "shap_values", ",", "feature_names", "=", "None", ",", "method", "=", "\"pca\"", ",", "alpha", "=", "1.0", ",", "show", "=", "True", ")", ":", "if", "feature_names", "is", "None", ":", "feature_names", "=", "[", "labels", "[", "'FEATURE'", "]", "%", "str", "(", "i", ")", "for", "i", "in", "range", "(", "shap_values", ".", "shape", "[", "1", "]", ")", "]", "ind", "=", "convert_name", "(", "ind", ",", "shap_values", ",", "feature_names", ")", "if", "ind", "==", "\"sum()\"", ":", "cvals", "=", "shap_values", ".", "sum", "(", "1", ")", "fname", "=", "\"sum(SHAP values)\"", "else", ":", "cvals", "=", "shap_values", "[", ":", ",", "ind", "]", "fname", "=", "feature_names", "[", "ind", "]", "# see if we need to compute the embedding", "if", "type", "(", "method", ")", "==", "str", "and", "method", "==", "\"pca\"", ":", "pca", "=", "sklearn", ".", "decomposition", ".", "PCA", "(", "2", ")", "embedding_values", "=", "pca", ".", "fit_transform", "(", "shap_values", ")", "elif", "hasattr", "(", "method", ",", "\"shape\"", ")", "and", "method", ".", "shape", "[", "1", "]", "==", "2", ":", "embedding_values", "=", "method", "else", ":", "print", "(", "\"Unsupported embedding method:\"", ",", "method", ")", "pl", ".", "scatter", "(", "embedding_values", "[", ":", ",", "0", "]", ",", "embedding_values", "[", ":", ",", "1", "]", ",", "c", "=", "cvals", ",", "cmap", "=", "colors", ".", "red_blue", ",", "alpha", "=", "alpha", ",", "linewidth", "=", "0", ")", "pl", ".", "axis", "(", "\"off\"", ")", "#pl.title(feature_names[ind])", "cb", "=", "pl", ".", "colorbar", "(", ")", "cb", ".", "set_label", "(", "\"SHAP value for\\n\"", "+", "fname", ",", "size", "=", "13", ")", "cb", ".", "outline", ".", "set_visible", "(", "False", ")", "pl", ".", "gcf", "(", ")", ".", "set_size_inches", "(", "7.5", ",", "5", ")", "bbox", "=", "cb", ".", "ax", ".", "get_window_extent", "(", ")", ".", "transformed", "(", "pl", ".", "gcf", "(", ")", ".", "dpi_scale_trans", ".", "inverted", "(", ")", ")", "cb", ".", "ax", ".", "set_aspect", "(", "(", "bbox", ".", "height", "-", "0.7", ")", "*", "10", ")", "cb", ".", "set_alpha", "(", "1", ")", "if", "show", ":", "pl", ".", "show", "(", ")" ]
Use the SHAP values as an embedding which we project to 2D for visualization. Parameters ---------- ind : int or string If this is an int it is the index of the feature to use to color the embedding. If this is a string it is either the name of the feature, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values, which is the model's output (minus it's expected value). shap_values : numpy.array Matrix of SHAP values (# samples x # features). feature_names : None or list The names of the features in the shap_values array. method : "pca" or numpy.array How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D PCA projection of shap_values is used. If a numpy array then is should be (# samples x 2) and represent the embedding of that values. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset.
[ "Use", "the", "SHAP", "values", "as", "an", "embedding", "which", "we", "project", "to", "2D", "for", "visualization", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/embedding.py#L14-L78
24,221
slundberg/shap
shap/benchmark/metrics.py
runtime
def runtime(X, y, model_generator, method_name): """ Runtime transform = "negate" sort_order = 1 """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] for i in range(1): X_train, X_test, y_train, _ = train_test_split(__toarray(X), y, test_size=100, random_state=i) # define the model we are going to explain model = model_generator() model.fit(X_train, y_train) # evaluate each method start = time.time() explainer = getattr(methods, method_name)(model, X_train) build_time = time.time() - start start = time.time() explainer(X_test) explain_time = time.time() - start # we always normalize the explain time as though we were explaining 1000 samples # even if to reduce the runtime of the benchmark we do less (like just 100) method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0]) np.random.seed(old_seed) return None, np.mean(method_reps)
python
def runtime(X, y, model_generator, method_name): """ Runtime transform = "negate" sort_order = 1 """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] for i in range(1): X_train, X_test, y_train, _ = train_test_split(__toarray(X), y, test_size=100, random_state=i) # define the model we are going to explain model = model_generator() model.fit(X_train, y_train) # evaluate each method start = time.time() explainer = getattr(methods, method_name)(model, X_train) build_time = time.time() - start start = time.time() explainer(X_test) explain_time = time.time() - start # we always normalize the explain time as though we were explaining 1000 samples # even if to reduce the runtime of the benchmark we do less (like just 100) method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0]) np.random.seed(old_seed) return None, np.mean(method_reps)
[ "def", "runtime", "(", "X", ",", "y", ",", "model_generator", ",", "method_name", ")", ":", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "3293", ")", "# average the method scores over several train/test splits", "method_reps", "=", "[", "]", "for", "i", "in", "range", "(", "1", ")", ":", "X_train", ",", "X_test", ",", "y_train", ",", "_", "=", "train_test_split", "(", "__toarray", "(", "X", ")", ",", "y", ",", "test_size", "=", "100", ",", "random_state", "=", "i", ")", "# define the model we are going to explain", "model", "=", "model_generator", "(", ")", "model", ".", "fit", "(", "X_train", ",", "y_train", ")", "# evaluate each method", "start", "=", "time", ".", "time", "(", ")", "explainer", "=", "getattr", "(", "methods", ",", "method_name", ")", "(", "model", ",", "X_train", ")", "build_time", "=", "time", ".", "time", "(", ")", "-", "start", "start", "=", "time", ".", "time", "(", ")", "explainer", "(", "X_test", ")", "explain_time", "=", "time", ".", "time", "(", ")", "-", "start", "# we always normalize the explain time as though we were explaining 1000 samples", "# even if to reduce the runtime of the benchmark we do less (like just 100)", "method_reps", ".", "append", "(", "build_time", "+", "explain_time", "*", "1000.0", "/", "X_test", ".", "shape", "[", "0", "]", ")", "np", ".", "random", ".", "seed", "(", "old_seed", ")", "return", "None", ",", "np", ".", "mean", "(", "method_reps", ")" ]
Runtime transform = "negate" sort_order = 1
[ "Runtime", "transform", "=", "negate", "sort_order", "=", "1" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L22-L54
24,222
slundberg/shap
shap/benchmark/metrics.py
local_accuracy
def local_accuracy(X, y, model_generator, method_name): """ Local Accuracy transform = "identity" sort_order = 2 """ def score_map(true, pred): """ Converts local accuracy from % of standard deviation to numerical scores for coloring. """ v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8)) if v < 1e-6: return 1.0 elif v < 0.01: return 0.9 elif v < 0.05: return 0.75 elif v < 0.1: return 0.6 elif v < 0.2: return 0.4 elif v < 0.3: return 0.3 elif v < 0.5: return 0.2 elif v < 0.7: return 0.1 else: return 0.0 def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state): return measures.local_accuracy( X_train, y_train, X_test, y_test, attr_function(X_test), model_generator, score_map, trained_model ) return None, __score_method(X, y, None, model_generator, score_function, method_name)
python
def local_accuracy(X, y, model_generator, method_name): """ Local Accuracy transform = "identity" sort_order = 2 """ def score_map(true, pred): """ Converts local accuracy from % of standard deviation to numerical scores for coloring. """ v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8)) if v < 1e-6: return 1.0 elif v < 0.01: return 0.9 elif v < 0.05: return 0.75 elif v < 0.1: return 0.6 elif v < 0.2: return 0.4 elif v < 0.3: return 0.3 elif v < 0.5: return 0.2 elif v < 0.7: return 0.1 else: return 0.0 def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state): return measures.local_accuracy( X_train, y_train, X_test, y_test, attr_function(X_test), model_generator, score_map, trained_model ) return None, __score_method(X, y, None, model_generator, score_function, method_name)
[ "def", "local_accuracy", "(", "X", ",", "y", ",", "model_generator", ",", "method_name", ")", ":", "def", "score_map", "(", "true", ",", "pred", ")", ":", "\"\"\" Converts local accuracy from % of standard deviation to numerical scores for coloring.\n \"\"\"", "v", "=", "min", "(", "1.0", ",", "np", ".", "std", "(", "pred", "-", "true", ")", "/", "(", "np", ".", "std", "(", "true", ")", "+", "1e-8", ")", ")", "if", "v", "<", "1e-6", ":", "return", "1.0", "elif", "v", "<", "0.01", ":", "return", "0.9", "elif", "v", "<", "0.05", ":", "return", "0.75", "elif", "v", "<", "0.1", ":", "return", "0.6", "elif", "v", "<", "0.2", ":", "return", "0.4", "elif", "v", "<", "0.3", ":", "return", "0.3", "elif", "v", "<", "0.5", ":", "return", "0.2", "elif", "v", "<", "0.7", ":", "return", "0.1", "else", ":", "return", "0.0", "def", "score_function", "(", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", ",", "attr_function", ",", "trained_model", ",", "random_state", ")", ":", "return", "measures", ".", "local_accuracy", "(", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_function", "(", "X_test", ")", ",", "model_generator", ",", "score_map", ",", "trained_model", ")", "return", "None", ",", "__score_method", "(", "X", ",", "y", ",", "None", ",", "model_generator", ",", "score_function", ",", "method_name", ")" ]
Local Accuracy transform = "identity" sort_order = 2
[ "Local", "Accuracy", "transform", "=", "identity", "sort_order", "=", "2" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L56-L90
24,223
slundberg/shap
shap/benchmark/metrics.py
__score_method
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"): """ Test an explanation method. """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest() for i in range(nreps): X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i) # define the model we are going to explain, caching so we onlu build it once model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle" cache_file = os.path.join(cache_dir, model_id + ".pickle") if os.path.isfile(cache_file): with open(cache_file, "rb") as f: model = pickle.load(f) else: model = model_generator() model.fit(X_train, y_train) with open(cache_file, "wb") as f: pickle.dump(model, f) attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash]) def score(attr_function): def cached_attr_function(X_inner): if attr_key not in _attribution_cache: _attribution_cache[attr_key] = attr_function(X_inner) return _attribution_cache[attr_key] #cached_attr_function = lambda X: __check_cache(attr_function, X) if fcounts is None: return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i) else: scores = [] for f in fcounts: scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i)) return np.array(scores) # evaluate the method (only building the attribution function if we need to) if attr_key not in _attribution_cache: method_reps.append(score(getattr(methods, method_name)(model, X_train))) else: method_reps.append(score(None)) np.random.seed(old_seed) return np.array(method_reps).mean(0)
python
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"): """ Test an explanation method. """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest() for i in range(nreps): X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i) # define the model we are going to explain, caching so we onlu build it once model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle" cache_file = os.path.join(cache_dir, model_id + ".pickle") if os.path.isfile(cache_file): with open(cache_file, "rb") as f: model = pickle.load(f) else: model = model_generator() model.fit(X_train, y_train) with open(cache_file, "wb") as f: pickle.dump(model, f) attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash]) def score(attr_function): def cached_attr_function(X_inner): if attr_key not in _attribution_cache: _attribution_cache[attr_key] = attr_function(X_inner) return _attribution_cache[attr_key] #cached_attr_function = lambda X: __check_cache(attr_function, X) if fcounts is None: return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i) else: scores = [] for f in fcounts: scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i)) return np.array(scores) # evaluate the method (only building the attribution function if we need to) if attr_key not in _attribution_cache: method_reps.append(score(getattr(methods, method_name)(model, X_train))) else: method_reps.append(score(None)) np.random.seed(old_seed) return np.array(method_reps).mean(0)
[ "def", "__score_method", "(", "X", ",", "y", ",", "fcounts", ",", "model_generator", ",", "score_function", ",", "method_name", ",", "nreps", "=", "10", ",", "test_size", "=", "100", ",", "cache_dir", "=", "\"/tmp\"", ")", ":", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "3293", ")", "# average the method scores over several train/test splits", "method_reps", "=", "[", "]", "data_hash", "=", "hashlib", ".", "sha256", "(", "__toarray", "(", "X", ")", ".", "flatten", "(", ")", ")", ".", "hexdigest", "(", ")", "+", "hashlib", ".", "sha256", "(", "__toarray", "(", "y", ")", ")", ".", "hexdigest", "(", ")", "for", "i", "in", "range", "(", "nreps", ")", ":", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", "=", "train_test_split", "(", "__toarray", "(", "X", ")", ",", "y", ",", "test_size", "=", "test_size", ",", "random_state", "=", "i", ")", "# define the model we are going to explain, caching so we onlu build it once", "model_id", "=", "\"model_cache__v\"", "+", "\"__\"", ".", "join", "(", "[", "__version__", ",", "data_hash", ",", "model_generator", ".", "__name__", "]", ")", "+", "\".pickle\"", "cache_file", "=", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "model_id", "+", "\".pickle\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "cache_file", ")", ":", "with", "open", "(", "cache_file", ",", "\"rb\"", ")", "as", "f", ":", "model", "=", "pickle", ".", "load", "(", "f", ")", "else", ":", "model", "=", "model_generator", "(", ")", "model", ".", "fit", "(", "X_train", ",", "y_train", ")", "with", "open", "(", "cache_file", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "model", ",", "f", ")", "attr_key", "=", "\"_\"", ".", "join", "(", "[", "model_generator", ".", "__name__", ",", "method_name", ",", "str", "(", "test_size", ")", ",", "str", "(", "nreps", ")", ",", "str", "(", "i", ")", ",", "data_hash", "]", ")", "def", "score", "(", "attr_function", ")", ":", "def", "cached_attr_function", "(", "X_inner", ")", ":", "if", "attr_key", "not", "in", "_attribution_cache", ":", "_attribution_cache", "[", "attr_key", "]", "=", "attr_function", "(", "X_inner", ")", "return", "_attribution_cache", "[", "attr_key", "]", "#cached_attr_function = lambda X: __check_cache(attr_function, X)", "if", "fcounts", "is", "None", ":", "return", "score_function", "(", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", ",", "cached_attr_function", ",", "model", ",", "i", ")", "else", ":", "scores", "=", "[", "]", "for", "f", "in", "fcounts", ":", "scores", ".", "append", "(", "score_function", "(", "f", ",", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", ",", "cached_attr_function", ",", "model", ",", "i", ")", ")", "return", "np", ".", "array", "(", "scores", ")", "# evaluate the method (only building the attribution function if we need to)", "if", "attr_key", "not", "in", "_attribution_cache", ":", "method_reps", ".", "append", "(", "score", "(", "getattr", "(", "methods", ",", "method_name", ")", "(", "model", ",", "X_train", ")", ")", ")", "else", ":", "method_reps", ".", "append", "(", "score", "(", "None", ")", ")", "np", ".", "random", ".", "seed", "(", "old_seed", ")", "return", "np", ".", "array", "(", "method_reps", ")", ".", "mean", "(", "0", ")" ]
Test an explanation method.
[ "Test", "an", "explanation", "method", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L446-L495
24,224
slundberg/shap
shap/explainers/linear.py
LinearExplainer._estimate_transforms
def _estimate_transforms(self, nsamples): """ Uses block matrix inversion identities to quickly estimate transforms. After a bit of matrix math we can isolate a transform matrix (# features x # features) that is independent of any sample we are explaining. It is the result of averaging over all feature permutations, but we just use a fixed number of samples to estimate the value. TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could happen through a recursive method that uses the same block matrix inversion as below. """ M = len(self.coef) mean_transform = np.zeros((M,M)) x_transform = np.zeros((M,M)) inds = np.arange(M, dtype=np.int) for _ in tqdm(range(nsamples), "Estimating transforms"): np.random.shuffle(inds) cov_inv_SiSi = np.zeros((0,0)) cov_Si = np.zeros((M,0)) for j in range(M): i = inds[j] # use the last Si as the new S cov_S = cov_Si cov_inv_SS = cov_inv_SiSi # get the new cov_Si cov_Si = self.cov[:,inds[:j+1]] # compute the new cov_inv_SiSi from cov_inv_SS d = cov_Si[i,:-1].T t = np.matmul(cov_inv_SS, d) Z = self.cov[i, i] u = Z - np.matmul(t.T, d) cov_inv_SiSi = np.zeros((j+1, j+1)) if j > 0: cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u cov_inv_SiSi[-1, -1] = 1 / u # + coef @ (Q(bar(Sui)) - Q(bar(S))) mean_transform[i, i] += self.coef[i] # + coef @ R(Sui) coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]]) mean_transform[i, inds[:j+1]] += coef_R_Si # - coef @ R(S) coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]]) mean_transform[i, inds[:j]] -= coef_R_S # - coef @ (Q(Sui) - Q(S)) x_transform[i, i] += self.coef[i] # + coef @ R(Sui) x_transform[i, inds[:j+1]] += coef_R_Si # - coef @ R(S) x_transform[i, inds[:j]] -= coef_R_S mean_transform /= nsamples x_transform /= nsamples return mean_transform, x_transform
python
def _estimate_transforms(self, nsamples): """ Uses block matrix inversion identities to quickly estimate transforms. After a bit of matrix math we can isolate a transform matrix (# features x # features) that is independent of any sample we are explaining. It is the result of averaging over all feature permutations, but we just use a fixed number of samples to estimate the value. TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could happen through a recursive method that uses the same block matrix inversion as below. """ M = len(self.coef) mean_transform = np.zeros((M,M)) x_transform = np.zeros((M,M)) inds = np.arange(M, dtype=np.int) for _ in tqdm(range(nsamples), "Estimating transforms"): np.random.shuffle(inds) cov_inv_SiSi = np.zeros((0,0)) cov_Si = np.zeros((M,0)) for j in range(M): i = inds[j] # use the last Si as the new S cov_S = cov_Si cov_inv_SS = cov_inv_SiSi # get the new cov_Si cov_Si = self.cov[:,inds[:j+1]] # compute the new cov_inv_SiSi from cov_inv_SS d = cov_Si[i,:-1].T t = np.matmul(cov_inv_SS, d) Z = self.cov[i, i] u = Z - np.matmul(t.T, d) cov_inv_SiSi = np.zeros((j+1, j+1)) if j > 0: cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u cov_inv_SiSi[-1, -1] = 1 / u # + coef @ (Q(bar(Sui)) - Q(bar(S))) mean_transform[i, i] += self.coef[i] # + coef @ R(Sui) coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]]) mean_transform[i, inds[:j+1]] += coef_R_Si # - coef @ R(S) coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]]) mean_transform[i, inds[:j]] -= coef_R_S # - coef @ (Q(Sui) - Q(S)) x_transform[i, i] += self.coef[i] # + coef @ R(Sui) x_transform[i, inds[:j+1]] += coef_R_Si # - coef @ R(S) x_transform[i, inds[:j]] -= coef_R_S mean_transform /= nsamples x_transform /= nsamples return mean_transform, x_transform
[ "def", "_estimate_transforms", "(", "self", ",", "nsamples", ")", ":", "M", "=", "len", "(", "self", ".", "coef", ")", "mean_transform", "=", "np", ".", "zeros", "(", "(", "M", ",", "M", ")", ")", "x_transform", "=", "np", ".", "zeros", "(", "(", "M", ",", "M", ")", ")", "inds", "=", "np", ".", "arange", "(", "M", ",", "dtype", "=", "np", ".", "int", ")", "for", "_", "in", "tqdm", "(", "range", "(", "nsamples", ")", ",", "\"Estimating transforms\"", ")", ":", "np", ".", "random", ".", "shuffle", "(", "inds", ")", "cov_inv_SiSi", "=", "np", ".", "zeros", "(", "(", "0", ",", "0", ")", ")", "cov_Si", "=", "np", ".", "zeros", "(", "(", "M", ",", "0", ")", ")", "for", "j", "in", "range", "(", "M", ")", ":", "i", "=", "inds", "[", "j", "]", "# use the last Si as the new S", "cov_S", "=", "cov_Si", "cov_inv_SS", "=", "cov_inv_SiSi", "# get the new cov_Si", "cov_Si", "=", "self", ".", "cov", "[", ":", ",", "inds", "[", ":", "j", "+", "1", "]", "]", "# compute the new cov_inv_SiSi from cov_inv_SS", "d", "=", "cov_Si", "[", "i", ",", ":", "-", "1", "]", ".", "T", "t", "=", "np", ".", "matmul", "(", "cov_inv_SS", ",", "d", ")", "Z", "=", "self", ".", "cov", "[", "i", ",", "i", "]", "u", "=", "Z", "-", "np", ".", "matmul", "(", "t", ".", "T", ",", "d", ")", "cov_inv_SiSi", "=", "np", ".", "zeros", "(", "(", "j", "+", "1", ",", "j", "+", "1", ")", ")", "if", "j", ">", "0", ":", "cov_inv_SiSi", "[", ":", "-", "1", ",", ":", "-", "1", "]", "=", "cov_inv_SS", "+", "np", ".", "outer", "(", "t", ",", "t", ")", "/", "u", "cov_inv_SiSi", "[", ":", "-", "1", ",", "-", "1", "]", "=", "cov_inv_SiSi", "[", "-", "1", ",", ":", "-", "1", "]", "=", "-", "t", "/", "u", "cov_inv_SiSi", "[", "-", "1", ",", "-", "1", "]", "=", "1", "/", "u", "# + coef @ (Q(bar(Sui)) - Q(bar(S)))", "mean_transform", "[", "i", ",", "i", "]", "+=", "self", ".", "coef", "[", "i", "]", "# + coef @ R(Sui)", "coef_R_Si", "=", "np", ".", "matmul", "(", "self", ".", "coef", "[", "inds", "[", "j", "+", "1", ":", "]", "]", ",", "np", ".", "matmul", "(", "cov_Si", ",", "cov_inv_SiSi", ")", "[", "inds", "[", "j", "+", "1", ":", "]", "]", ")", "mean_transform", "[", "i", ",", "inds", "[", ":", "j", "+", "1", "]", "]", "+=", "coef_R_Si", "# - coef @ R(S)", "coef_R_S", "=", "np", ".", "matmul", "(", "self", ".", "coef", "[", "inds", "[", "j", ":", "]", "]", ",", "np", ".", "matmul", "(", "cov_S", ",", "cov_inv_SS", ")", "[", "inds", "[", "j", ":", "]", "]", ")", "mean_transform", "[", "i", ",", "inds", "[", ":", "j", "]", "]", "-=", "coef_R_S", "# - coef @ (Q(Sui) - Q(S))", "x_transform", "[", "i", ",", "i", "]", "+=", "self", ".", "coef", "[", "i", "]", "# + coef @ R(Sui)", "x_transform", "[", "i", ",", "inds", "[", ":", "j", "+", "1", "]", "]", "+=", "coef_R_Si", "# - coef @ R(S)", "x_transform", "[", "i", ",", "inds", "[", ":", "j", "]", "]", "-=", "coef_R_S", "mean_transform", "/=", "nsamples", "x_transform", "/=", "nsamples", "return", "mean_transform", ",", "x_transform" ]
Uses block matrix inversion identities to quickly estimate transforms. After a bit of matrix math we can isolate a transform matrix (# features x # features) that is independent of any sample we are explaining. It is the result of averaging over all feature permutations, but we just use a fixed number of samples to estimate the value. TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could happen through a recursive method that uses the same block matrix inversion as below.
[ "Uses", "block", "matrix", "inversion", "identities", "to", "quickly", "estimate", "transforms", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/linear.py#L113-L175
24,225
slundberg/shap
shap/benchmark/models.py
independentlinear60__ffnn
def independentlinear60__ffnn(): """ 4-Layer Neural Network """ from keras.models import Sequential from keras.layers import Dense model = Sequential() model.add(Dense(32, activation='relu', input_dim=60)) model.add(Dense(20, activation='relu')) model.add(Dense(20, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error']) return KerasWrap(model, 30, flatten_output=True)
python
def independentlinear60__ffnn(): """ 4-Layer Neural Network """ from keras.models import Sequential from keras.layers import Dense model = Sequential() model.add(Dense(32, activation='relu', input_dim=60)) model.add(Dense(20, activation='relu')) model.add(Dense(20, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error']) return KerasWrap(model, 30, flatten_output=True)
[ "def", "independentlinear60__ffnn", "(", ")", ":", "from", "keras", ".", "models", "import", "Sequential", "from", "keras", ".", "layers", "import", "Dense", "model", "=", "Sequential", "(", ")", "model", ".", "add", "(", "Dense", "(", "32", ",", "activation", "=", "'relu'", ",", "input_dim", "=", "60", ")", ")", "model", ".", "add", "(", "Dense", "(", "20", ",", "activation", "=", "'relu'", ")", ")", "model", ".", "add", "(", "Dense", "(", "20", ",", "activation", "=", "'relu'", ")", ")", "model", ".", "add", "(", "Dense", "(", "1", ")", ")", "model", ".", "compile", "(", "optimizer", "=", "'adam'", ",", "loss", "=", "'mean_squared_error'", ",", "metrics", "=", "[", "'mean_squared_error'", "]", ")", "return", "KerasWrap", "(", "model", ",", "30", ",", "flatten_output", "=", "True", ")" ]
4-Layer Neural Network
[ "4", "-", "Layer", "Neural", "Network" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/models.py#L114-L130
24,226
slundberg/shap
shap/benchmark/models.py
cric__gbm
def cric__gbm(): """ Gradient Boosted Trees """ import xgboost # max_depth and subsample match the params used for the full cric data in the paper # learning_rate was set a bit higher to allow for faster runtimes # n_estimators was chosen based on a train/test split of the data model = xgboost.XGBClassifier(max_depth=5, n_estimators=400, learning_rate=0.01, subsample=0.2, n_jobs=8, random_state=0) # we want to explain the margin, not the transformed probability outputs model.__orig_predict = model.predict model.predict = lambda X: model.__orig_predict(X, output_margin=True) # pylint: disable=E1123 return model
python
def cric__gbm(): """ Gradient Boosted Trees """ import xgboost # max_depth and subsample match the params used for the full cric data in the paper # learning_rate was set a bit higher to allow for faster runtimes # n_estimators was chosen based on a train/test split of the data model = xgboost.XGBClassifier(max_depth=5, n_estimators=400, learning_rate=0.01, subsample=0.2, n_jobs=8, random_state=0) # we want to explain the margin, not the transformed probability outputs model.__orig_predict = model.predict model.predict = lambda X: model.__orig_predict(X, output_margin=True) # pylint: disable=E1123 return model
[ "def", "cric__gbm", "(", ")", ":", "import", "xgboost", "# max_depth and subsample match the params used for the full cric data in the paper", "# learning_rate was set a bit higher to allow for faster runtimes", "# n_estimators was chosen based on a train/test split of the data", "model", "=", "xgboost", ".", "XGBClassifier", "(", "max_depth", "=", "5", ",", "n_estimators", "=", "400", ",", "learning_rate", "=", "0.01", ",", "subsample", "=", "0.2", ",", "n_jobs", "=", "8", ",", "random_state", "=", "0", ")", "# we want to explain the margin, not the transformed probability outputs", "model", ".", "__orig_predict", "=", "model", ".", "predict", "model", ".", "predict", "=", "lambda", "X", ":", "model", ".", "__orig_predict", "(", "X", ",", "output_margin", "=", "True", ")", "# pylint: disable=E1123", "return", "model" ]
Gradient Boosted Trees
[ "Gradient", "Boosted", "Trees" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/models.py#L173-L187
24,227
slundberg/shap
shap/benchmark/methods.py
lime_tabular_regression_1000
def lime_tabular_regression_1000(model, data): """ LIME Tabular 1000 """ return lambda X: other.LimeTabularExplainer(model.predict, data, mode="regression").attributions(X, nsamples=1000)
python
def lime_tabular_regression_1000(model, data): """ LIME Tabular 1000 """ return lambda X: other.LimeTabularExplainer(model.predict, data, mode="regression").attributions(X, nsamples=1000)
[ "def", "lime_tabular_regression_1000", "(", "model", ",", "data", ")", ":", "return", "lambda", "X", ":", "other", ".", "LimeTabularExplainer", "(", "model", ".", "predict", ",", "data", ",", "mode", "=", "\"regression\"", ")", ".", "attributions", "(", "X", ",", "nsamples", "=", "1000", ")" ]
LIME Tabular 1000
[ "LIME", "Tabular", "1000" ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/methods.py#L91-L94
24,228
slundberg/shap
shap/explainers/deep/__init__.py
DeepExplainer.shap_values
def shap_values(self, X, ranked_outputs=None, output_rank_order='max'): """ Return approximate SHAP values for the model applied to the data given by X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where shap_values is a list of numpy arrays for each of the output ranks, and indexes is a matrix that indicates for each sample which output indexes were choses as "top". output_rank_order : "max", "min", or "max_abs" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that indicates for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, ranked_outputs, output_rank_order)
python
def shap_values(self, X, ranked_outputs=None, output_rank_order='max'): """ Return approximate SHAP values for the model applied to the data given by X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where shap_values is a list of numpy arrays for each of the output ranks, and indexes is a matrix that indicates for each sample which output indexes were choses as "top". output_rank_order : "max", "min", or "max_abs" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that indicates for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, ranked_outputs, output_rank_order)
[ "def", "shap_values", "(", "self", ",", "X", ",", "ranked_outputs", "=", "None", ",", "output_rank_order", "=", "'max'", ")", ":", "return", "self", ".", "explainer", ".", "shap_values", "(", "X", ",", "ranked_outputs", ",", "output_rank_order", ")" ]
Return approximate SHAP values for the model applied to the data given by X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where shap_values is a list of numpy arrays for each of the output ranks, and indexes is a matrix that indicates for each sample which output indexes were choses as "top". output_rank_order : "max", "min", or "max_abs" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that indicates for each sample which output indexes were chosen as "top".
[ "Return", "approximate", "SHAP", "values", "for", "the", "model", "applied", "to", "the", "data", "given", "by", "X", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/__init__.py#L86-L119
24,229
ray-project/ray
python/ray/rllib/agents/mock.py
_agent_import_failed
def _agent_import_failed(trace): """Returns dummy agent class for if PyTorch etc. is not installed.""" class _AgentImportFailed(Trainer): _name = "AgentImportFailed" _default_config = with_common_config({}) def _setup(self, config): raise ImportError(trace) return _AgentImportFailed
python
def _agent_import_failed(trace): """Returns dummy agent class for if PyTorch etc. is not installed.""" class _AgentImportFailed(Trainer): _name = "AgentImportFailed" _default_config = with_common_config({}) def _setup(self, config): raise ImportError(trace) return _AgentImportFailed
[ "def", "_agent_import_failed", "(", "trace", ")", ":", "class", "_AgentImportFailed", "(", "Trainer", ")", ":", "_name", "=", "\"AgentImportFailed\"", "_default_config", "=", "with_common_config", "(", "{", "}", ")", "def", "_setup", "(", "self", ",", "config", ")", ":", "raise", "ImportError", "(", "trace", ")", "return", "_AgentImportFailed" ]
Returns dummy agent class for if PyTorch etc. is not installed.
[ "Returns", "dummy", "agent", "class", "for", "if", "PyTorch", "etc", ".", "is", "not", "installed", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/agents/mock.py#L108-L118
24,230
ray-project/ray
python/ray/tune/tune.py
run
def run(run_or_experiment, name=None, stop=None, config=None, resources_per_trial=None, num_samples=1, local_dir=None, upload_dir=None, trial_name_creator=None, loggers=None, sync_function=None, checkpoint_freq=0, checkpoint_at_end=False, export_formats=None, max_failures=3, restore=None, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True): """Executes training. Args: run_or_experiment (function|class|str|Experiment): If function|class|str, this is the algorithm or model to train. This may refer to the name of a built-on algorithm (e.g. RLLib's DQN or PPO), a user-defined trainable function or class, or the string identifier of a trainable function or class registered in the tune registry. If Experiment, then Tune will execute training based on Experiment.spec. name (str): Name of experiment. stop (dict): The stopping criteria. The keys may be any field in the return result of 'train()', whichever is reached first. Defaults to empty dict. config (dict): Algorithm-specific configuration for Tune variant generation (e.g. env, hyperparams). Defaults to empty dict. Custom search algorithms may ignore this. resources_per_trial (dict): Machine resources to allocate per trial, e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be assigned unless you specify them here. Defaults to 1 CPU and 0 GPUs in ``Trainable.default_resource_request()``. num_samples (int): Number of times to sample from the hyperparameter space. Defaults to 1. If `grid_search` is provided as an argument, the grid will be repeated `num_samples` of times. local_dir (str): Local dir to save training results to. Defaults to ``~/ray_results``. upload_dir (str): Optional URI to sync training results to (e.g. ``s3://bucket``). trial_name_creator (func): Optional function for generating the trial string representation. loggers (list): List of logger creators to be used with each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS. See `ray/tune/logger.py`. sync_function (func|str): Function for syncing the local_dir to upload_dir. If string, then it must be a string template for syncer to run. If not provided, the sync command defaults to standard S3 or gsutil sync comamnds. checkpoint_freq (int): How many training iterations between checkpoints. A value of 0 (default) disables checkpointing. checkpoint_at_end (bool): Whether to checkpoint at the end of the experiment regardless of the checkpoint_freq. Default is False. export_formats (list): List of formats that exported at the end of the experiment. Default is None. max_failures (int): Try to recover a trial from its last checkpoint at least this many times. Only applies if checkpointing is enabled. Setting to -1 will lead to infinite recovery retries. Defaults to 3. restore (str): Path to checkpoint. Only makes sense to set if running 1 trial. Defaults to None. search_alg (SearchAlgorithm): Search Algorithm. Defaults to BasicVariantGenerator. scheduler (TrialScheduler): Scheduler for executing the experiment. Choose among FIFO (default), MedianStopping, AsyncHyperBand, and HyperBand. with_server (bool): Starts a background Tune server. Needed for using the Client API. server_port (int): Port number for launching TuneServer. verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and trial results. resume (bool|"prompt"): If checkpoint exists, the experiment will resume from there. If resume is "prompt", Tune will prompt if checkpoint detected. queue_trials (bool): Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up. reuse_actors (bool): Whether to reuse actors between different trials when possible. This can drastically speed up experiments that start and stop actors often (e.g., PBT in time-multiplexing mode). This requires trials to have the same resource requirements. trial_executor (TrialExecutor): Manage the execution of trials. raise_on_failed_trial (bool): Raise TuneError if there exists failed trial (of ERROR state) when the experiments complete. Returns: List of Trial objects. Raises: TuneError if any trials failed and `raise_on_failed_trial` is True. Examples: >>> tune.run(mytrainable, scheduler=PopulationBasedTraining()) >>> tune.run(mytrainable, num_samples=5, reuse_actors=True) >>> tune.run( "PG", num_samples=5, config={ "env": "CartPole-v0", "lr": tune.sample_from(lambda _: np.random.rand()) } ) """ experiment = run_or_experiment if not isinstance(run_or_experiment, Experiment): experiment = Experiment( name, run_or_experiment, stop, config, resources_per_trial, num_samples, local_dir, upload_dir, trial_name_creator, loggers, sync_function, checkpoint_freq, checkpoint_at_end, export_formats, max_failures, restore) else: logger.debug("Ignoring some parameters passed into tune.run.") checkpoint_dir = _find_checkpoint_dir(experiment) should_restore = _prompt_restore(checkpoint_dir, resume) runner = None if should_restore: try: runner = TrialRunner.restore(checkpoint_dir, search_alg, scheduler, trial_executor) except Exception: logger.exception("Runner restore failed. Restarting experiment.") else: logger.info("Starting a new experiment.") if not runner: scheduler = scheduler or FIFOScheduler() search_alg = search_alg or BasicVariantGenerator() search_alg.add_configurations([experiment]) runner = TrialRunner( search_alg, scheduler=scheduler, metadata_checkpoint_dir=checkpoint_dir, launch_web_server=with_server, server_port=server_port, verbose=bool(verbose > 1), queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor) if verbose: print(runner.debug_string(max_debug=99999)) last_debug = 0 while not runner.is_finished(): runner.step() if time.time() - last_debug > DEBUG_PRINT_INTERVAL: if verbose: print(runner.debug_string()) last_debug = time.time() if verbose: print(runner.debug_string(max_debug=99999)) wait_for_log_sync() errored_trials = [] for trial in runner.get_trials(): if trial.status != Trial.TERMINATED: errored_trials += [trial] if errored_trials: if raise_on_failed_trial: raise TuneError("Trials did not complete", errored_trials) else: logger.error("Trials did not complete: %s", errored_trials) return runner.get_trials()
python
def run(run_or_experiment, name=None, stop=None, config=None, resources_per_trial=None, num_samples=1, local_dir=None, upload_dir=None, trial_name_creator=None, loggers=None, sync_function=None, checkpoint_freq=0, checkpoint_at_end=False, export_formats=None, max_failures=3, restore=None, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True): """Executes training. Args: run_or_experiment (function|class|str|Experiment): If function|class|str, this is the algorithm or model to train. This may refer to the name of a built-on algorithm (e.g. RLLib's DQN or PPO), a user-defined trainable function or class, or the string identifier of a trainable function or class registered in the tune registry. If Experiment, then Tune will execute training based on Experiment.spec. name (str): Name of experiment. stop (dict): The stopping criteria. The keys may be any field in the return result of 'train()', whichever is reached first. Defaults to empty dict. config (dict): Algorithm-specific configuration for Tune variant generation (e.g. env, hyperparams). Defaults to empty dict. Custom search algorithms may ignore this. resources_per_trial (dict): Machine resources to allocate per trial, e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be assigned unless you specify them here. Defaults to 1 CPU and 0 GPUs in ``Trainable.default_resource_request()``. num_samples (int): Number of times to sample from the hyperparameter space. Defaults to 1. If `grid_search` is provided as an argument, the grid will be repeated `num_samples` of times. local_dir (str): Local dir to save training results to. Defaults to ``~/ray_results``. upload_dir (str): Optional URI to sync training results to (e.g. ``s3://bucket``). trial_name_creator (func): Optional function for generating the trial string representation. loggers (list): List of logger creators to be used with each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS. See `ray/tune/logger.py`. sync_function (func|str): Function for syncing the local_dir to upload_dir. If string, then it must be a string template for syncer to run. If not provided, the sync command defaults to standard S3 or gsutil sync comamnds. checkpoint_freq (int): How many training iterations between checkpoints. A value of 0 (default) disables checkpointing. checkpoint_at_end (bool): Whether to checkpoint at the end of the experiment regardless of the checkpoint_freq. Default is False. export_formats (list): List of formats that exported at the end of the experiment. Default is None. max_failures (int): Try to recover a trial from its last checkpoint at least this many times. Only applies if checkpointing is enabled. Setting to -1 will lead to infinite recovery retries. Defaults to 3. restore (str): Path to checkpoint. Only makes sense to set if running 1 trial. Defaults to None. search_alg (SearchAlgorithm): Search Algorithm. Defaults to BasicVariantGenerator. scheduler (TrialScheduler): Scheduler for executing the experiment. Choose among FIFO (default), MedianStopping, AsyncHyperBand, and HyperBand. with_server (bool): Starts a background Tune server. Needed for using the Client API. server_port (int): Port number for launching TuneServer. verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and trial results. resume (bool|"prompt"): If checkpoint exists, the experiment will resume from there. If resume is "prompt", Tune will prompt if checkpoint detected. queue_trials (bool): Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up. reuse_actors (bool): Whether to reuse actors between different trials when possible. This can drastically speed up experiments that start and stop actors often (e.g., PBT in time-multiplexing mode). This requires trials to have the same resource requirements. trial_executor (TrialExecutor): Manage the execution of trials. raise_on_failed_trial (bool): Raise TuneError if there exists failed trial (of ERROR state) when the experiments complete. Returns: List of Trial objects. Raises: TuneError if any trials failed and `raise_on_failed_trial` is True. Examples: >>> tune.run(mytrainable, scheduler=PopulationBasedTraining()) >>> tune.run(mytrainable, num_samples=5, reuse_actors=True) >>> tune.run( "PG", num_samples=5, config={ "env": "CartPole-v0", "lr": tune.sample_from(lambda _: np.random.rand()) } ) """ experiment = run_or_experiment if not isinstance(run_or_experiment, Experiment): experiment = Experiment( name, run_or_experiment, stop, config, resources_per_trial, num_samples, local_dir, upload_dir, trial_name_creator, loggers, sync_function, checkpoint_freq, checkpoint_at_end, export_formats, max_failures, restore) else: logger.debug("Ignoring some parameters passed into tune.run.") checkpoint_dir = _find_checkpoint_dir(experiment) should_restore = _prompt_restore(checkpoint_dir, resume) runner = None if should_restore: try: runner = TrialRunner.restore(checkpoint_dir, search_alg, scheduler, trial_executor) except Exception: logger.exception("Runner restore failed. Restarting experiment.") else: logger.info("Starting a new experiment.") if not runner: scheduler = scheduler or FIFOScheduler() search_alg = search_alg or BasicVariantGenerator() search_alg.add_configurations([experiment]) runner = TrialRunner( search_alg, scheduler=scheduler, metadata_checkpoint_dir=checkpoint_dir, launch_web_server=with_server, server_port=server_port, verbose=bool(verbose > 1), queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor) if verbose: print(runner.debug_string(max_debug=99999)) last_debug = 0 while not runner.is_finished(): runner.step() if time.time() - last_debug > DEBUG_PRINT_INTERVAL: if verbose: print(runner.debug_string()) last_debug = time.time() if verbose: print(runner.debug_string(max_debug=99999)) wait_for_log_sync() errored_trials = [] for trial in runner.get_trials(): if trial.status != Trial.TERMINATED: errored_trials += [trial] if errored_trials: if raise_on_failed_trial: raise TuneError("Trials did not complete", errored_trials) else: logger.error("Trials did not complete: %s", errored_trials) return runner.get_trials()
[ "def", "run", "(", "run_or_experiment", ",", "name", "=", "None", ",", "stop", "=", "None", ",", "config", "=", "None", ",", "resources_per_trial", "=", "None", ",", "num_samples", "=", "1", ",", "local_dir", "=", "None", ",", "upload_dir", "=", "None", ",", "trial_name_creator", "=", "None", ",", "loggers", "=", "None", ",", "sync_function", "=", "None", ",", "checkpoint_freq", "=", "0", ",", "checkpoint_at_end", "=", "False", ",", "export_formats", "=", "None", ",", "max_failures", "=", "3", ",", "restore", "=", "None", ",", "search_alg", "=", "None", ",", "scheduler", "=", "None", ",", "with_server", "=", "False", ",", "server_port", "=", "TuneServer", ".", "DEFAULT_PORT", ",", "verbose", "=", "2", ",", "resume", "=", "False", ",", "queue_trials", "=", "False", ",", "reuse_actors", "=", "False", ",", "trial_executor", "=", "None", ",", "raise_on_failed_trial", "=", "True", ")", ":", "experiment", "=", "run_or_experiment", "if", "not", "isinstance", "(", "run_or_experiment", ",", "Experiment", ")", ":", "experiment", "=", "Experiment", "(", "name", ",", "run_or_experiment", ",", "stop", ",", "config", ",", "resources_per_trial", ",", "num_samples", ",", "local_dir", ",", "upload_dir", ",", "trial_name_creator", ",", "loggers", ",", "sync_function", ",", "checkpoint_freq", ",", "checkpoint_at_end", ",", "export_formats", ",", "max_failures", ",", "restore", ")", "else", ":", "logger", ".", "debug", "(", "\"Ignoring some parameters passed into tune.run.\"", ")", "checkpoint_dir", "=", "_find_checkpoint_dir", "(", "experiment", ")", "should_restore", "=", "_prompt_restore", "(", "checkpoint_dir", ",", "resume", ")", "runner", "=", "None", "if", "should_restore", ":", "try", ":", "runner", "=", "TrialRunner", ".", "restore", "(", "checkpoint_dir", ",", "search_alg", ",", "scheduler", ",", "trial_executor", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Runner restore failed. Restarting experiment.\"", ")", "else", ":", "logger", ".", "info", "(", "\"Starting a new experiment.\"", ")", "if", "not", "runner", ":", "scheduler", "=", "scheduler", "or", "FIFOScheduler", "(", ")", "search_alg", "=", "search_alg", "or", "BasicVariantGenerator", "(", ")", "search_alg", ".", "add_configurations", "(", "[", "experiment", "]", ")", "runner", "=", "TrialRunner", "(", "search_alg", ",", "scheduler", "=", "scheduler", ",", "metadata_checkpoint_dir", "=", "checkpoint_dir", ",", "launch_web_server", "=", "with_server", ",", "server_port", "=", "server_port", ",", "verbose", "=", "bool", "(", "verbose", ">", "1", ")", ",", "queue_trials", "=", "queue_trials", ",", "reuse_actors", "=", "reuse_actors", ",", "trial_executor", "=", "trial_executor", ")", "if", "verbose", ":", "print", "(", "runner", ".", "debug_string", "(", "max_debug", "=", "99999", ")", ")", "last_debug", "=", "0", "while", "not", "runner", ".", "is_finished", "(", ")", ":", "runner", ".", "step", "(", ")", "if", "time", ".", "time", "(", ")", "-", "last_debug", ">", "DEBUG_PRINT_INTERVAL", ":", "if", "verbose", ":", "print", "(", "runner", ".", "debug_string", "(", ")", ")", "last_debug", "=", "time", ".", "time", "(", ")", "if", "verbose", ":", "print", "(", "runner", ".", "debug_string", "(", "max_debug", "=", "99999", ")", ")", "wait_for_log_sync", "(", ")", "errored_trials", "=", "[", "]", "for", "trial", "in", "runner", ".", "get_trials", "(", ")", ":", "if", "trial", ".", "status", "!=", "Trial", ".", "TERMINATED", ":", "errored_trials", "+=", "[", "trial", "]", "if", "errored_trials", ":", "if", "raise_on_failed_trial", ":", "raise", "TuneError", "(", "\"Trials did not complete\"", ",", "errored_trials", ")", "else", ":", "logger", ".", "error", "(", "\"Trials did not complete: %s\"", ",", "errored_trials", ")", "return", "runner", ".", "get_trials", "(", ")" ]
Executes training. Args: run_or_experiment (function|class|str|Experiment): If function|class|str, this is the algorithm or model to train. This may refer to the name of a built-on algorithm (e.g. RLLib's DQN or PPO), a user-defined trainable function or class, or the string identifier of a trainable function or class registered in the tune registry. If Experiment, then Tune will execute training based on Experiment.spec. name (str): Name of experiment. stop (dict): The stopping criteria. The keys may be any field in the return result of 'train()', whichever is reached first. Defaults to empty dict. config (dict): Algorithm-specific configuration for Tune variant generation (e.g. env, hyperparams). Defaults to empty dict. Custom search algorithms may ignore this. resources_per_trial (dict): Machine resources to allocate per trial, e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be assigned unless you specify them here. Defaults to 1 CPU and 0 GPUs in ``Trainable.default_resource_request()``. num_samples (int): Number of times to sample from the hyperparameter space. Defaults to 1. If `grid_search` is provided as an argument, the grid will be repeated `num_samples` of times. local_dir (str): Local dir to save training results to. Defaults to ``~/ray_results``. upload_dir (str): Optional URI to sync training results to (e.g. ``s3://bucket``). trial_name_creator (func): Optional function for generating the trial string representation. loggers (list): List of logger creators to be used with each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS. See `ray/tune/logger.py`. sync_function (func|str): Function for syncing the local_dir to upload_dir. If string, then it must be a string template for syncer to run. If not provided, the sync command defaults to standard S3 or gsutil sync comamnds. checkpoint_freq (int): How many training iterations between checkpoints. A value of 0 (default) disables checkpointing. checkpoint_at_end (bool): Whether to checkpoint at the end of the experiment regardless of the checkpoint_freq. Default is False. export_formats (list): List of formats that exported at the end of the experiment. Default is None. max_failures (int): Try to recover a trial from its last checkpoint at least this many times. Only applies if checkpointing is enabled. Setting to -1 will lead to infinite recovery retries. Defaults to 3. restore (str): Path to checkpoint. Only makes sense to set if running 1 trial. Defaults to None. search_alg (SearchAlgorithm): Search Algorithm. Defaults to BasicVariantGenerator. scheduler (TrialScheduler): Scheduler for executing the experiment. Choose among FIFO (default), MedianStopping, AsyncHyperBand, and HyperBand. with_server (bool): Starts a background Tune server. Needed for using the Client API. server_port (int): Port number for launching TuneServer. verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and trial results. resume (bool|"prompt"): If checkpoint exists, the experiment will resume from there. If resume is "prompt", Tune will prompt if checkpoint detected. queue_trials (bool): Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up. reuse_actors (bool): Whether to reuse actors between different trials when possible. This can drastically speed up experiments that start and stop actors often (e.g., PBT in time-multiplexing mode). This requires trials to have the same resource requirements. trial_executor (TrialExecutor): Manage the execution of trials. raise_on_failed_trial (bool): Raise TuneError if there exists failed trial (of ERROR state) when the experiments complete. Returns: List of Trial objects. Raises: TuneError if any trials failed and `raise_on_failed_trial` is True. Examples: >>> tune.run(mytrainable, scheduler=PopulationBasedTraining()) >>> tune.run(mytrainable, num_samples=5, reuse_actors=True) >>> tune.run( "PG", num_samples=5, config={ "env": "CartPole-v0", "lr": tune.sample_from(lambda _: np.random.rand()) } )
[ "Executes", "training", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/tune.py#L68-L257
24,231
ray-project/ray
python/ray/tune/tune.py
run_experiments
def run_experiments(experiments, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True): """Runs and blocks until all trials finish. Examples: >>> experiment_spec = Experiment("experiment", my_func) >>> run_experiments(experiments=experiment_spec) >>> experiment_spec = {"experiment": {"run": my_func}} >>> run_experiments(experiments=experiment_spec) >>> run_experiments( >>> experiments=experiment_spec, >>> scheduler=MedianStoppingRule(...)) >>> run_experiments( >>> experiments=experiment_spec, >>> search_alg=SearchAlgorithm(), >>> scheduler=MedianStoppingRule(...)) Returns: List of Trial objects, holding data for each executed trial. """ # This is important to do this here # because it schematize the experiments # and it conducts the implicit registration. experiments = convert_to_experiment_list(experiments) trials = [] for exp in experiments: trials += run( exp, search_alg=search_alg, scheduler=scheduler, with_server=with_server, server_port=server_port, verbose=verbose, resume=resume, queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor, raise_on_failed_trial=raise_on_failed_trial) return trials
python
def run_experiments(experiments, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True): """Runs and blocks until all trials finish. Examples: >>> experiment_spec = Experiment("experiment", my_func) >>> run_experiments(experiments=experiment_spec) >>> experiment_spec = {"experiment": {"run": my_func}} >>> run_experiments(experiments=experiment_spec) >>> run_experiments( >>> experiments=experiment_spec, >>> scheduler=MedianStoppingRule(...)) >>> run_experiments( >>> experiments=experiment_spec, >>> search_alg=SearchAlgorithm(), >>> scheduler=MedianStoppingRule(...)) Returns: List of Trial objects, holding data for each executed trial. """ # This is important to do this here # because it schematize the experiments # and it conducts the implicit registration. experiments = convert_to_experiment_list(experiments) trials = [] for exp in experiments: trials += run( exp, search_alg=search_alg, scheduler=scheduler, with_server=with_server, server_port=server_port, verbose=verbose, resume=resume, queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor, raise_on_failed_trial=raise_on_failed_trial) return trials
[ "def", "run_experiments", "(", "experiments", ",", "search_alg", "=", "None", ",", "scheduler", "=", "None", ",", "with_server", "=", "False", ",", "server_port", "=", "TuneServer", ".", "DEFAULT_PORT", ",", "verbose", "=", "2", ",", "resume", "=", "False", ",", "queue_trials", "=", "False", ",", "reuse_actors", "=", "False", ",", "trial_executor", "=", "None", ",", "raise_on_failed_trial", "=", "True", ")", ":", "# This is important to do this here", "# because it schematize the experiments", "# and it conducts the implicit registration.", "experiments", "=", "convert_to_experiment_list", "(", "experiments", ")", "trials", "=", "[", "]", "for", "exp", "in", "experiments", ":", "trials", "+=", "run", "(", "exp", ",", "search_alg", "=", "search_alg", ",", "scheduler", "=", "scheduler", ",", "with_server", "=", "with_server", ",", "server_port", "=", "server_port", ",", "verbose", "=", "verbose", ",", "resume", "=", "resume", ",", "queue_trials", "=", "queue_trials", ",", "reuse_actors", "=", "reuse_actors", ",", "trial_executor", "=", "trial_executor", ",", "raise_on_failed_trial", "=", "raise_on_failed_trial", ")", "return", "trials" ]
Runs and blocks until all trials finish. Examples: >>> experiment_spec = Experiment("experiment", my_func) >>> run_experiments(experiments=experiment_spec) >>> experiment_spec = {"experiment": {"run": my_func}} >>> run_experiments(experiments=experiment_spec) >>> run_experiments( >>> experiments=experiment_spec, >>> scheduler=MedianStoppingRule(...)) >>> run_experiments( >>> experiments=experiment_spec, >>> search_alg=SearchAlgorithm(), >>> scheduler=MedianStoppingRule(...)) Returns: List of Trial objects, holding data for each executed trial.
[ "Runs", "and", "blocks", "until", "all", "trials", "finish", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/tune.py#L260-L312
24,232
ray-project/ray
python/ray/experimental/streaming/communication.py
DataOutput._flush
def _flush(self, close=False): """Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. """ for channel in self.forward_channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_key_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.round_robin_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes()
python
def _flush(self, close=False): """Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. """ for channel in self.forward_channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_key_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.round_robin_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes()
[ "def", "_flush", "(", "self", ",", "close", "=", "False", ")", ":", "for", "channel", "in", "self", ".", "forward_channels", ":", "if", "close", "is", "True", ":", "channel", ".", "queue", ".", "put_next", "(", "None", ")", "channel", ".", "queue", ".", "_flush_writes", "(", ")", "for", "channels", "in", "self", ".", "shuffle_channels", ":", "for", "channel", "in", "channels", ":", "if", "close", "is", "True", ":", "channel", ".", "queue", ".", "put_next", "(", "None", ")", "channel", ".", "queue", ".", "_flush_writes", "(", ")", "for", "channels", "in", "self", ".", "shuffle_key_channels", ":", "for", "channel", "in", "channels", ":", "if", "close", "is", "True", ":", "channel", ".", "queue", ".", "put_next", "(", "None", ")", "channel", ".", "queue", ".", "_flush_writes", "(", ")", "for", "channels", "in", "self", ".", "round_robin_channels", ":", "for", "channel", "in", "channels", ":", "if", "close", "is", "True", ":", "channel", ".", "queue", ".", "put_next", "(", "None", ")", "channel", ".", "queue", ".", "_flush_writes", "(", ")" ]
Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing.
[ "Flushes", "remaining", "output", "records", "in", "the", "output", "queues", "to", "plasma", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/communication.py#L205-L233
24,233
ray-project/ray
python/ray/rllib/models/preprocessors.py
get_preprocessor
def get_preprocessor(space): """Returns an appropriate preprocessor class for the given space.""" legacy_patch_shapes(space) obs_shape = space.shape if isinstance(space, gym.spaces.Discrete): preprocessor = OneHotPreprocessor elif obs_shape == ATARI_OBS_SHAPE: preprocessor = GenericPixelPreprocessor elif obs_shape == ATARI_RAM_OBS_SHAPE: preprocessor = AtariRamPreprocessor elif isinstance(space, gym.spaces.Tuple): preprocessor = TupleFlatteningPreprocessor elif isinstance(space, gym.spaces.Dict): preprocessor = DictFlatteningPreprocessor else: preprocessor = NoPreprocessor return preprocessor
python
def get_preprocessor(space): """Returns an appropriate preprocessor class for the given space.""" legacy_patch_shapes(space) obs_shape = space.shape if isinstance(space, gym.spaces.Discrete): preprocessor = OneHotPreprocessor elif obs_shape == ATARI_OBS_SHAPE: preprocessor = GenericPixelPreprocessor elif obs_shape == ATARI_RAM_OBS_SHAPE: preprocessor = AtariRamPreprocessor elif isinstance(space, gym.spaces.Tuple): preprocessor = TupleFlatteningPreprocessor elif isinstance(space, gym.spaces.Dict): preprocessor = DictFlatteningPreprocessor else: preprocessor = NoPreprocessor return preprocessor
[ "def", "get_preprocessor", "(", "space", ")", ":", "legacy_patch_shapes", "(", "space", ")", "obs_shape", "=", "space", ".", "shape", "if", "isinstance", "(", "space", ",", "gym", ".", "spaces", ".", "Discrete", ")", ":", "preprocessor", "=", "OneHotPreprocessor", "elif", "obs_shape", "==", "ATARI_OBS_SHAPE", ":", "preprocessor", "=", "GenericPixelPreprocessor", "elif", "obs_shape", "==", "ATARI_RAM_OBS_SHAPE", ":", "preprocessor", "=", "AtariRamPreprocessor", "elif", "isinstance", "(", "space", ",", "gym", ".", "spaces", ".", "Tuple", ")", ":", "preprocessor", "=", "TupleFlatteningPreprocessor", "elif", "isinstance", "(", "space", ",", "gym", ".", "spaces", ".", "Dict", ")", ":", "preprocessor", "=", "DictFlatteningPreprocessor", "else", ":", "preprocessor", "=", "NoPreprocessor", "return", "preprocessor" ]
Returns an appropriate preprocessor class for the given space.
[ "Returns", "an", "appropriate", "preprocessor", "class", "for", "the", "given", "space", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/models/preprocessors.py#L242-L261
24,234
ray-project/ray
python/ray/rllib/models/preprocessors.py
legacy_patch_shapes
def legacy_patch_shapes(space): """Assigns shapes to spaces that don't have shapes. This is only needed for older gym versions that don't set shapes properly for Tuple and Discrete spaces. """ if not hasattr(space, "shape"): if isinstance(space, gym.spaces.Discrete): space.shape = () elif isinstance(space, gym.spaces.Tuple): shapes = [] for s in space.spaces: shape = legacy_patch_shapes(s) shapes.append(shape) space.shape = tuple(shapes) return space.shape
python
def legacy_patch_shapes(space): """Assigns shapes to spaces that don't have shapes. This is only needed for older gym versions that don't set shapes properly for Tuple and Discrete spaces. """ if not hasattr(space, "shape"): if isinstance(space, gym.spaces.Discrete): space.shape = () elif isinstance(space, gym.spaces.Tuple): shapes = [] for s in space.spaces: shape = legacy_patch_shapes(s) shapes.append(shape) space.shape = tuple(shapes) return space.shape
[ "def", "legacy_patch_shapes", "(", "space", ")", ":", "if", "not", "hasattr", "(", "space", ",", "\"shape\"", ")", ":", "if", "isinstance", "(", "space", ",", "gym", ".", "spaces", ".", "Discrete", ")", ":", "space", ".", "shape", "=", "(", ")", "elif", "isinstance", "(", "space", ",", "gym", ".", "spaces", ".", "Tuple", ")", ":", "shapes", "=", "[", "]", "for", "s", "in", "space", ".", "spaces", ":", "shape", "=", "legacy_patch_shapes", "(", "s", ")", "shapes", ".", "append", "(", "shape", ")", "space", ".", "shape", "=", "tuple", "(", "shapes", ")", "return", "space", ".", "shape" ]
Assigns shapes to spaces that don't have shapes. This is only needed for older gym versions that don't set shapes properly for Tuple and Discrete spaces.
[ "Assigns", "shapes", "to", "spaces", "that", "don", "t", "have", "shapes", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/models/preprocessors.py#L264-L281
24,235
ray-project/ray
python/ray/rllib/optimizers/aso_minibatch_buffer.py
MinibatchBuffer.get
def get(self): """Get a new batch from the internal ring buffer. Returns: buf: Data item saved from inqueue. released: True if the item is now removed from the ring buffer. """ if self.ttl[self.idx] <= 0: self.buffers[self.idx] = self.inqueue.get(timeout=300.0) self.ttl[self.idx] = self.cur_max_ttl if self.cur_max_ttl < self.max_ttl: self.cur_max_ttl += 1 buf = self.buffers[self.idx] self.ttl[self.idx] -= 1 released = self.ttl[self.idx] <= 0 if released: self.buffers[self.idx] = None self.idx = (self.idx + 1) % len(self.buffers) return buf, released
python
def get(self): """Get a new batch from the internal ring buffer. Returns: buf: Data item saved from inqueue. released: True if the item is now removed from the ring buffer. """ if self.ttl[self.idx] <= 0: self.buffers[self.idx] = self.inqueue.get(timeout=300.0) self.ttl[self.idx] = self.cur_max_ttl if self.cur_max_ttl < self.max_ttl: self.cur_max_ttl += 1 buf = self.buffers[self.idx] self.ttl[self.idx] -= 1 released = self.ttl[self.idx] <= 0 if released: self.buffers[self.idx] = None self.idx = (self.idx + 1) % len(self.buffers) return buf, released
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "ttl", "[", "self", ".", "idx", "]", "<=", "0", ":", "self", ".", "buffers", "[", "self", ".", "idx", "]", "=", "self", ".", "inqueue", ".", "get", "(", "timeout", "=", "300.0", ")", "self", ".", "ttl", "[", "self", ".", "idx", "]", "=", "self", ".", "cur_max_ttl", "if", "self", ".", "cur_max_ttl", "<", "self", ".", "max_ttl", ":", "self", ".", "cur_max_ttl", "+=", "1", "buf", "=", "self", ".", "buffers", "[", "self", ".", "idx", "]", "self", ".", "ttl", "[", "self", ".", "idx", "]", "-=", "1", "released", "=", "self", ".", "ttl", "[", "self", ".", "idx", "]", "<=", "0", "if", "released", ":", "self", ".", "buffers", "[", "self", ".", "idx", "]", "=", "None", "self", ".", "idx", "=", "(", "self", ".", "idx", "+", "1", ")", "%", "len", "(", "self", ".", "buffers", ")", "return", "buf", ",", "released" ]
Get a new batch from the internal ring buffer. Returns: buf: Data item saved from inqueue. released: True if the item is now removed from the ring buffer.
[ "Get", "a", "new", "batch", "from", "the", "internal", "ring", "buffer", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/optimizers/aso_minibatch_buffer.py#L30-L48
24,236
ray-project/ray
python/ray/tune/trainable.py
Trainable.train
def train(self): """Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress. """ start = time.time() result = self._train() assert isinstance(result, dict), "_train() needs to return a dict." # We do not modify internal state nor update this result if duplicate. if RESULT_DUPLICATE in result: return result result = result.copy() self._iteration += 1 self._iterations_since_restore += 1 if result.get(TIME_THIS_ITER_S) is not None: time_this_iter = result[TIME_THIS_ITER_S] else: time_this_iter = time.time() - start self._time_total += time_this_iter self._time_since_restore += time_this_iter result.setdefault(DONE, False) # self._timesteps_total should only be tracked if increments provided if result.get(TIMESTEPS_THIS_ITER) is not None: if self._timesteps_total is None: self._timesteps_total = 0 self._timesteps_total += result[TIMESTEPS_THIS_ITER] self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER] # self._episodes_total should only be tracked if increments provided if result.get(EPISODES_THIS_ITER) is not None: if self._episodes_total is None: self._episodes_total = 0 self._episodes_total += result[EPISODES_THIS_ITER] # self._timesteps_total should not override user-provided total result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total) result.setdefault(EPISODES_TOTAL, self._episodes_total) result.setdefault(TRAINING_ITERATION, self._iteration) # Provides auto-filled neg_mean_loss for avoiding regressions if result.get("mean_loss"): result.setdefault("neg_mean_loss", -result["mean_loss"]) now = datetime.today() result.update( experiment_id=self._experiment_id, date=now.strftime("%Y-%m-%d_%H-%M-%S"), timestamp=int(time.mktime(now.timetuple())), time_this_iter_s=time_this_iter, time_total_s=self._time_total, pid=os.getpid(), hostname=os.uname()[1], node_ip=self._local_ip, config=self.config, time_since_restore=self._time_since_restore, timesteps_since_restore=self._timesteps_since_restore, iterations_since_restore=self._iterations_since_restore) self._log_result(result) return result
python
def train(self): """Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress. """ start = time.time() result = self._train() assert isinstance(result, dict), "_train() needs to return a dict." # We do not modify internal state nor update this result if duplicate. if RESULT_DUPLICATE in result: return result result = result.copy() self._iteration += 1 self._iterations_since_restore += 1 if result.get(TIME_THIS_ITER_S) is not None: time_this_iter = result[TIME_THIS_ITER_S] else: time_this_iter = time.time() - start self._time_total += time_this_iter self._time_since_restore += time_this_iter result.setdefault(DONE, False) # self._timesteps_total should only be tracked if increments provided if result.get(TIMESTEPS_THIS_ITER) is not None: if self._timesteps_total is None: self._timesteps_total = 0 self._timesteps_total += result[TIMESTEPS_THIS_ITER] self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER] # self._episodes_total should only be tracked if increments provided if result.get(EPISODES_THIS_ITER) is not None: if self._episodes_total is None: self._episodes_total = 0 self._episodes_total += result[EPISODES_THIS_ITER] # self._timesteps_total should not override user-provided total result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total) result.setdefault(EPISODES_TOTAL, self._episodes_total) result.setdefault(TRAINING_ITERATION, self._iteration) # Provides auto-filled neg_mean_loss for avoiding regressions if result.get("mean_loss"): result.setdefault("neg_mean_loss", -result["mean_loss"]) now = datetime.today() result.update( experiment_id=self._experiment_id, date=now.strftime("%Y-%m-%d_%H-%M-%S"), timestamp=int(time.mktime(now.timetuple())), time_this_iter_s=time_this_iter, time_total_s=self._time_total, pid=os.getpid(), hostname=os.uname()[1], node_ip=self._local_ip, config=self.config, time_since_restore=self._time_since_restore, timesteps_since_restore=self._timesteps_since_restore, iterations_since_restore=self._iterations_since_restore) self._log_result(result) return result
[ "def", "train", "(", "self", ")", ":", "start", "=", "time", ".", "time", "(", ")", "result", "=", "self", ".", "_train", "(", ")", "assert", "isinstance", "(", "result", ",", "dict", ")", ",", "\"_train() needs to return a dict.\"", "# We do not modify internal state nor update this result if duplicate.", "if", "RESULT_DUPLICATE", "in", "result", ":", "return", "result", "result", "=", "result", ".", "copy", "(", ")", "self", ".", "_iteration", "+=", "1", "self", ".", "_iterations_since_restore", "+=", "1", "if", "result", ".", "get", "(", "TIME_THIS_ITER_S", ")", "is", "not", "None", ":", "time_this_iter", "=", "result", "[", "TIME_THIS_ITER_S", "]", "else", ":", "time_this_iter", "=", "time", ".", "time", "(", ")", "-", "start", "self", ".", "_time_total", "+=", "time_this_iter", "self", ".", "_time_since_restore", "+=", "time_this_iter", "result", ".", "setdefault", "(", "DONE", ",", "False", ")", "# self._timesteps_total should only be tracked if increments provided", "if", "result", ".", "get", "(", "TIMESTEPS_THIS_ITER", ")", "is", "not", "None", ":", "if", "self", ".", "_timesteps_total", "is", "None", ":", "self", ".", "_timesteps_total", "=", "0", "self", ".", "_timesteps_total", "+=", "result", "[", "TIMESTEPS_THIS_ITER", "]", "self", ".", "_timesteps_since_restore", "+=", "result", "[", "TIMESTEPS_THIS_ITER", "]", "# self._episodes_total should only be tracked if increments provided", "if", "result", ".", "get", "(", "EPISODES_THIS_ITER", ")", "is", "not", "None", ":", "if", "self", ".", "_episodes_total", "is", "None", ":", "self", ".", "_episodes_total", "=", "0", "self", ".", "_episodes_total", "+=", "result", "[", "EPISODES_THIS_ITER", "]", "# self._timesteps_total should not override user-provided total", "result", ".", "setdefault", "(", "TIMESTEPS_TOTAL", ",", "self", ".", "_timesteps_total", ")", "result", ".", "setdefault", "(", "EPISODES_TOTAL", ",", "self", ".", "_episodes_total", ")", "result", ".", "setdefault", "(", "TRAINING_ITERATION", ",", "self", ".", "_iteration", ")", "# Provides auto-filled neg_mean_loss for avoiding regressions", "if", "result", ".", "get", "(", "\"mean_loss\"", ")", ":", "result", ".", "setdefault", "(", "\"neg_mean_loss\"", ",", "-", "result", "[", "\"mean_loss\"", "]", ")", "now", "=", "datetime", ".", "today", "(", ")", "result", ".", "update", "(", "experiment_id", "=", "self", ".", "_experiment_id", ",", "date", "=", "now", ".", "strftime", "(", "\"%Y-%m-%d_%H-%M-%S\"", ")", ",", "timestamp", "=", "int", "(", "time", ".", "mktime", "(", "now", ".", "timetuple", "(", ")", ")", ")", ",", "time_this_iter_s", "=", "time_this_iter", ",", "time_total_s", "=", "self", ".", "_time_total", ",", "pid", "=", "os", ".", "getpid", "(", ")", ",", "hostname", "=", "os", ".", "uname", "(", ")", "[", "1", "]", ",", "node_ip", "=", "self", ".", "_local_ip", ",", "config", "=", "self", ".", "config", ",", "time_since_restore", "=", "self", ".", "_time_since_restore", ",", "timesteps_since_restore", "=", "self", ".", "_timesteps_since_restore", ",", "iterations_since_restore", "=", "self", ".", "_iterations_since_restore", ")", "self", ".", "_log_result", "(", "result", ")", "return", "result" ]
Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress.
[ "Runs", "one", "logical", "iteration", "of", "training", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trainable.py#L111-L211
24,237
ray-project/ray
python/ray/tune/trainable.py
Trainable.save
def save(self, checkpoint_dir=None): """Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore(). """ checkpoint_dir = os.path.join(checkpoint_dir or self.logdir, "checkpoint_{}".format(self._iteration)) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint = self._save(checkpoint_dir) saved_as_dict = False if isinstance(checkpoint, string_types): if (not checkpoint.startswith(checkpoint_dir) or checkpoint == checkpoint_dir): raise ValueError( "The returned checkpoint path must be within the " "given checkpoint dir {}: {}".format( checkpoint_dir, checkpoint)) if not os.path.exists(checkpoint): raise ValueError( "The returned checkpoint path does not exist: {}".format( checkpoint)) checkpoint_path = checkpoint elif isinstance(checkpoint, dict): saved_as_dict = True checkpoint_path = os.path.join(checkpoint_dir, "checkpoint") with open(checkpoint_path, "wb") as f: pickle.dump(checkpoint, f) else: raise ValueError( "`_save` must return a dict or string type: {}".format( str(type(checkpoint)))) with open(checkpoint_path + ".tune_metadata", "wb") as f: pickle.dump({ "experiment_id": self._experiment_id, "iteration": self._iteration, "timesteps_total": self._timesteps_total, "time_total": self._time_total, "episodes_total": self._episodes_total, "saved_as_dict": saved_as_dict }, f) return checkpoint_path
python
def save(self, checkpoint_dir=None): """Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore(). """ checkpoint_dir = os.path.join(checkpoint_dir or self.logdir, "checkpoint_{}".format(self._iteration)) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint = self._save(checkpoint_dir) saved_as_dict = False if isinstance(checkpoint, string_types): if (not checkpoint.startswith(checkpoint_dir) or checkpoint == checkpoint_dir): raise ValueError( "The returned checkpoint path must be within the " "given checkpoint dir {}: {}".format( checkpoint_dir, checkpoint)) if not os.path.exists(checkpoint): raise ValueError( "The returned checkpoint path does not exist: {}".format( checkpoint)) checkpoint_path = checkpoint elif isinstance(checkpoint, dict): saved_as_dict = True checkpoint_path = os.path.join(checkpoint_dir, "checkpoint") with open(checkpoint_path, "wb") as f: pickle.dump(checkpoint, f) else: raise ValueError( "`_save` must return a dict or string type: {}".format( str(type(checkpoint)))) with open(checkpoint_path + ".tune_metadata", "wb") as f: pickle.dump({ "experiment_id": self._experiment_id, "iteration": self._iteration, "timesteps_total": self._timesteps_total, "time_total": self._time_total, "episodes_total": self._episodes_total, "saved_as_dict": saved_as_dict }, f) return checkpoint_path
[ "def", "save", "(", "self", ",", "checkpoint_dir", "=", "None", ")", ":", "checkpoint_dir", "=", "os", ".", "path", ".", "join", "(", "checkpoint_dir", "or", "self", ".", "logdir", ",", "\"checkpoint_{}\"", ".", "format", "(", "self", ".", "_iteration", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "checkpoint_dir", ")", ":", "os", ".", "makedirs", "(", "checkpoint_dir", ")", "checkpoint", "=", "self", ".", "_save", "(", "checkpoint_dir", ")", "saved_as_dict", "=", "False", "if", "isinstance", "(", "checkpoint", ",", "string_types", ")", ":", "if", "(", "not", "checkpoint", ".", "startswith", "(", "checkpoint_dir", ")", "or", "checkpoint", "==", "checkpoint_dir", ")", ":", "raise", "ValueError", "(", "\"The returned checkpoint path must be within the \"", "\"given checkpoint dir {}: {}\"", ".", "format", "(", "checkpoint_dir", ",", "checkpoint", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "checkpoint", ")", ":", "raise", "ValueError", "(", "\"The returned checkpoint path does not exist: {}\"", ".", "format", "(", "checkpoint", ")", ")", "checkpoint_path", "=", "checkpoint", "elif", "isinstance", "(", "checkpoint", ",", "dict", ")", ":", "saved_as_dict", "=", "True", "checkpoint_path", "=", "os", ".", "path", ".", "join", "(", "checkpoint_dir", ",", "\"checkpoint\"", ")", "with", "open", "(", "checkpoint_path", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "checkpoint", ",", "f", ")", "else", ":", "raise", "ValueError", "(", "\"`_save` must return a dict or string type: {}\"", ".", "format", "(", "str", "(", "type", "(", "checkpoint", ")", ")", ")", ")", "with", "open", "(", "checkpoint_path", "+", "\".tune_metadata\"", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "{", "\"experiment_id\"", ":", "self", ".", "_experiment_id", ",", "\"iteration\"", ":", "self", ".", "_iteration", ",", "\"timesteps_total\"", ":", "self", ".", "_timesteps_total", ",", "\"time_total\"", ":", "self", ".", "_time_total", ",", "\"episodes_total\"", ":", "self", ".", "_episodes_total", ",", "\"saved_as_dict\"", ":", "saved_as_dict", "}", ",", "f", ")", "return", "checkpoint_path" ]
Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore().
[ "Saves", "the", "current", "model", "state", "to", "a", "checkpoint", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trainable.py#L224-L273
24,238
ray-project/ray
python/ray/tune/trainable.py
Trainable.save_to_object
def save_to_object(self): """Saves the current model state to a Python object. It also saves to disk but does not return the checkpoint path. Returns: Object holding checkpoint data. """ tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir) checkpoint_prefix = self.save(tmpdir) data = {} base_dir = os.path.dirname(checkpoint_prefix) for path in os.listdir(base_dir): path = os.path.join(base_dir, path) if path.startswith(checkpoint_prefix): with open(path, "rb") as f: data[os.path.basename(path)] = f.read() out = io.BytesIO() data_dict = pickle.dumps({ "checkpoint_name": os.path.basename(checkpoint_prefix), "data": data, }) if len(data_dict) > 10e6: # getting pretty large logger.info("Checkpoint size is {} bytes".format(len(data_dict))) out.write(data_dict) shutil.rmtree(tmpdir) return out.getvalue()
python
def save_to_object(self): """Saves the current model state to a Python object. It also saves to disk but does not return the checkpoint path. Returns: Object holding checkpoint data. """ tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir) checkpoint_prefix = self.save(tmpdir) data = {} base_dir = os.path.dirname(checkpoint_prefix) for path in os.listdir(base_dir): path = os.path.join(base_dir, path) if path.startswith(checkpoint_prefix): with open(path, "rb") as f: data[os.path.basename(path)] = f.read() out = io.BytesIO() data_dict = pickle.dumps({ "checkpoint_name": os.path.basename(checkpoint_prefix), "data": data, }) if len(data_dict) > 10e6: # getting pretty large logger.info("Checkpoint size is {} bytes".format(len(data_dict))) out.write(data_dict) shutil.rmtree(tmpdir) return out.getvalue()
[ "def", "save_to_object", "(", "self", ")", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", "\"save_to_object\"", ",", "dir", "=", "self", ".", "logdir", ")", "checkpoint_prefix", "=", "self", ".", "save", "(", "tmpdir", ")", "data", "=", "{", "}", "base_dir", "=", "os", ".", "path", ".", "dirname", "(", "checkpoint_prefix", ")", "for", "path", "in", "os", ".", "listdir", "(", "base_dir", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "path", ")", "if", "path", ".", "startswith", "(", "checkpoint_prefix", ")", ":", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "data", "[", "os", ".", "path", ".", "basename", "(", "path", ")", "]", "=", "f", ".", "read", "(", ")", "out", "=", "io", ".", "BytesIO", "(", ")", "data_dict", "=", "pickle", ".", "dumps", "(", "{", "\"checkpoint_name\"", ":", "os", ".", "path", ".", "basename", "(", "checkpoint_prefix", ")", ",", "\"data\"", ":", "data", ",", "}", ")", "if", "len", "(", "data_dict", ")", ">", "10e6", ":", "# getting pretty large", "logger", ".", "info", "(", "\"Checkpoint size is {} bytes\"", ".", "format", "(", "len", "(", "data_dict", ")", ")", ")", "out", ".", "write", "(", "data_dict", ")", "shutil", ".", "rmtree", "(", "tmpdir", ")", "return", "out", ".", "getvalue", "(", ")" ]
Saves the current model state to a Python object. It also saves to disk but does not return the checkpoint path. Returns: Object holding checkpoint data.
[ "Saves", "the", "current", "model", "state", "to", "a", "Python", "object", ".", "It", "also", "saves", "to", "disk", "but", "does", "not", "return", "the", "checkpoint", "path", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trainable.py#L275-L304
24,239
ray-project/ray
python/ray/tune/trainable.py
Trainable.restore_from_object
def restore_from_object(self, obj): """Restores training state from a checkpoint object. These checkpoints are returned from calls to save_to_object(). """ info = pickle.loads(obj) data = info["data"] tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir) checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"]) for file_name, file_contents in data.items(): with open(os.path.join(tmpdir, file_name), "wb") as f: f.write(file_contents) self.restore(checkpoint_path) shutil.rmtree(tmpdir)
python
def restore_from_object(self, obj): """Restores training state from a checkpoint object. These checkpoints are returned from calls to save_to_object(). """ info = pickle.loads(obj) data = info["data"] tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir) checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"]) for file_name, file_contents in data.items(): with open(os.path.join(tmpdir, file_name), "wb") as f: f.write(file_contents) self.restore(checkpoint_path) shutil.rmtree(tmpdir)
[ "def", "restore_from_object", "(", "self", ",", "obj", ")", ":", "info", "=", "pickle", ".", "loads", "(", "obj", ")", "data", "=", "info", "[", "\"data\"", "]", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", "\"restore_from_object\"", ",", "dir", "=", "self", ".", "logdir", ")", "checkpoint_path", "=", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "info", "[", "\"checkpoint_name\"", "]", ")", "for", "file_name", ",", "file_contents", "in", "data", ".", "items", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "file_name", ")", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "file_contents", ")", "self", ".", "restore", "(", "checkpoint_path", ")", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
Restores training state from a checkpoint object. These checkpoints are returned from calls to save_to_object().
[ "Restores", "training", "state", "from", "a", "checkpoint", "object", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trainable.py#L334-L350
24,240
ray-project/ray
python/ray/tune/trainable.py
Trainable.export_model
def export_model(self, export_formats, export_dir=None): """Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. """ export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir)
python
def export_model(self, export_formats, export_dir=None): """Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. """ export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir)
[ "def", "export_model", "(", "self", ",", "export_formats", ",", "export_dir", "=", "None", ")", ":", "export_dir", "=", "export_dir", "or", "self", ".", "logdir", "return", "self", ".", "_export_model", "(", "export_formats", ",", "export_dir", ")" ]
Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models.
[ "Exports", "model", "based", "on", "export_formats", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trainable.py#L352-L367
24,241
ray-project/ray
python/ray/rllib/utils/schedules.py
LinearSchedule.value
def value(self, t): """See Schedule.value""" fraction = min(float(t) / max(1, self.schedule_timesteps), 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p)
python
def value(self, t): """See Schedule.value""" fraction = min(float(t) / max(1, self.schedule_timesteps), 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p)
[ "def", "value", "(", "self", ",", "t", ")", ":", "fraction", "=", "min", "(", "float", "(", "t", ")", "/", "max", "(", "1", ",", "self", ".", "schedule_timesteps", ")", ",", "1.0", ")", "return", "self", ".", "initial_p", "+", "fraction", "*", "(", "self", ".", "final_p", "-", "self", ".", "initial_p", ")" ]
See Schedule.value
[ "See", "Schedule", ".", "value" ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/utils/schedules.py#L105-L108
24,242
ray-project/ray
python/ray/tune/automlboard/common/utils.py
dump_json
def dump_json(json_info, json_file, overwrite=True): """Dump a whole json record into the given file. Overwrite the file if the overwrite flag set. Args: json_info (dict): Information dict to be dumped. json_file (str): File path to be dumped to. overwrite(boolean) """ if overwrite: mode = "w" else: mode = "w+" try: with open(json_file, mode) as f: f.write(json.dumps(json_info)) except BaseException as e: logging.error(e.message)
python
def dump_json(json_info, json_file, overwrite=True): """Dump a whole json record into the given file. Overwrite the file if the overwrite flag set. Args: json_info (dict): Information dict to be dumped. json_file (str): File path to be dumped to. overwrite(boolean) """ if overwrite: mode = "w" else: mode = "w+" try: with open(json_file, mode) as f: f.write(json.dumps(json_info)) except BaseException as e: logging.error(e.message)
[ "def", "dump_json", "(", "json_info", ",", "json_file", ",", "overwrite", "=", "True", ")", ":", "if", "overwrite", ":", "mode", "=", "\"w\"", "else", ":", "mode", "=", "\"w+\"", "try", ":", "with", "open", "(", "json_file", ",", "mode", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "json_info", ")", ")", "except", "BaseException", "as", "e", ":", "logging", ".", "error", "(", "e", ".", "message", ")" ]
Dump a whole json record into the given file. Overwrite the file if the overwrite flag set. Args: json_info (dict): Information dict to be dumped. json_file (str): File path to be dumped to. overwrite(boolean)
[ "Dump", "a", "whole", "json", "record", "into", "the", "given", "file", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/common/utils.py#L11-L30
24,243
ray-project/ray
python/ray/tune/automlboard/common/utils.py
parse_json
def parse_json(json_file): """Parse a whole json record from the given file. Return None if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. Returns: A dict of json info. """ if not os.path.exists(json_file): return None try: with open(json_file, "r") as f: info_str = f.readlines() info_str = "".join(info_str) json_info = json.loads(info_str) return unicode2str(json_info) except BaseException as e: logging.error(e.message) return None
python
def parse_json(json_file): """Parse a whole json record from the given file. Return None if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. Returns: A dict of json info. """ if not os.path.exists(json_file): return None try: with open(json_file, "r") as f: info_str = f.readlines() info_str = "".join(info_str) json_info = json.loads(info_str) return unicode2str(json_info) except BaseException as e: logging.error(e.message) return None
[ "def", "parse_json", "(", "json_file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "json_file", ")", ":", "return", "None", "try", ":", "with", "open", "(", "json_file", ",", "\"r\"", ")", "as", "f", ":", "info_str", "=", "f", ".", "readlines", "(", ")", "info_str", "=", "\"\"", ".", "join", "(", "info_str", ")", "json_info", "=", "json", ".", "loads", "(", "info_str", ")", "return", "unicode2str", "(", "json_info", ")", "except", "BaseException", "as", "e", ":", "logging", ".", "error", "(", "e", ".", "message", ")", "return", "None" ]
Parse a whole json record from the given file. Return None if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. Returns: A dict of json info.
[ "Parse", "a", "whole", "json", "record", "from", "the", "given", "file", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/common/utils.py#L33-L55
24,244
ray-project/ray
python/ray/tune/automlboard/common/utils.py
parse_multiple_json
def parse_multiple_json(json_file, offset=None): """Parse multiple json records from the given file. Seek to the offset as the start point before parsing if offset set. return empty list if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. offset (int): Initial seek position of the file. Returns: A dict of json info. New offset after parsing. """ json_info_list = [] if not os.path.exists(json_file): return json_info_list try: with open(json_file, "r") as f: if offset: f.seek(offset) for line in f: if line[-1] != "\n": # Incomplete line break json_info = json.loads(line) json_info_list.append(json_info) offset += len(line) except BaseException as e: logging.error(e.message) return json_info_list, offset
python
def parse_multiple_json(json_file, offset=None): """Parse multiple json records from the given file. Seek to the offset as the start point before parsing if offset set. return empty list if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. offset (int): Initial seek position of the file. Returns: A dict of json info. New offset after parsing. """ json_info_list = [] if not os.path.exists(json_file): return json_info_list try: with open(json_file, "r") as f: if offset: f.seek(offset) for line in f: if line[-1] != "\n": # Incomplete line break json_info = json.loads(line) json_info_list.append(json_info) offset += len(line) except BaseException as e: logging.error(e.message) return json_info_list, offset
[ "def", "parse_multiple_json", "(", "json_file", ",", "offset", "=", "None", ")", ":", "json_info_list", "=", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "json_file", ")", ":", "return", "json_info_list", "try", ":", "with", "open", "(", "json_file", ",", "\"r\"", ")", "as", "f", ":", "if", "offset", ":", "f", ".", "seek", "(", "offset", ")", "for", "line", "in", "f", ":", "if", "line", "[", "-", "1", "]", "!=", "\"\\n\"", ":", "# Incomplete line", "break", "json_info", "=", "json", ".", "loads", "(", "line", ")", "json_info_list", ".", "append", "(", "json_info", ")", "offset", "+=", "len", "(", "line", ")", "except", "BaseException", "as", "e", ":", "logging", ".", "error", "(", "e", ".", "message", ")", "return", "json_info_list", ",", "offset" ]
Parse multiple json records from the given file. Seek to the offset as the start point before parsing if offset set. return empty list if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. offset (int): Initial seek position of the file. Returns: A dict of json info. New offset after parsing.
[ "Parse", "multiple", "json", "records", "from", "the", "given", "file", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/common/utils.py#L58-L92
24,245
ray-project/ray
python/ray/tune/automlboard/common/utils.py
unicode2str
def unicode2str(content): """Convert the unicode element of the content to str recursively.""" if isinstance(content, dict): result = {} for key in content.keys(): result[unicode2str(key)] = unicode2str(content[key]) return result elif isinstance(content, list): return [unicode2str(element) for element in content] elif isinstance(content, int) or isinstance(content, float): return content else: return content.encode("utf-8")
python
def unicode2str(content): """Convert the unicode element of the content to str recursively.""" if isinstance(content, dict): result = {} for key in content.keys(): result[unicode2str(key)] = unicode2str(content[key]) return result elif isinstance(content, list): return [unicode2str(element) for element in content] elif isinstance(content, int) or isinstance(content, float): return content else: return content.encode("utf-8")
[ "def", "unicode2str", "(", "content", ")", ":", "if", "isinstance", "(", "content", ",", "dict", ")", ":", "result", "=", "{", "}", "for", "key", "in", "content", ".", "keys", "(", ")", ":", "result", "[", "unicode2str", "(", "key", ")", "]", "=", "unicode2str", "(", "content", "[", "key", "]", ")", "return", "result", "elif", "isinstance", "(", "content", ",", "list", ")", ":", "return", "[", "unicode2str", "(", "element", ")", "for", "element", "in", "content", "]", "elif", "isinstance", "(", "content", ",", "int", ")", "or", "isinstance", "(", "content", ",", "float", ")", ":", "return", "content", "else", ":", "return", "content", ".", "encode", "(", "\"utf-8\"", ")" ]
Convert the unicode element of the content to str recursively.
[ "Convert", "the", "unicode", "element", "of", "the", "content", "to", "str", "recursively", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/common/utils.py#L100-L112
24,246
ray-project/ray
examples/lbfgs/driver.py
LinearModel.loss
def loss(self, xs, ys): """Computes the loss of the network.""" return float( self.sess.run( self.cross_entropy, feed_dict={ self.x: xs, self.y_: ys }))
python
def loss(self, xs, ys): """Computes the loss of the network.""" return float( self.sess.run( self.cross_entropy, feed_dict={ self.x: xs, self.y_: ys }))
[ "def", "loss", "(", "self", ",", "xs", ",", "ys", ")", ":", "return", "float", "(", "self", ".", "sess", ".", "run", "(", "self", ".", "cross_entropy", ",", "feed_dict", "=", "{", "self", ".", "x", ":", "xs", ",", "self", ".", "y_", ":", "ys", "}", ")", ")" ]
Computes the loss of the network.
[ "Computes", "the", "loss", "of", "the", "network", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/lbfgs/driver.py#L63-L70
24,247
ray-project/ray
examples/lbfgs/driver.py
LinearModel.grad
def grad(self, xs, ys): """Computes the gradients of the network.""" return self.sess.run( self.cross_entropy_grads, feed_dict={ self.x: xs, self.y_: ys })
python
def grad(self, xs, ys): """Computes the gradients of the network.""" return self.sess.run( self.cross_entropy_grads, feed_dict={ self.x: xs, self.y_: ys })
[ "def", "grad", "(", "self", ",", "xs", ",", "ys", ")", ":", "return", "self", ".", "sess", ".", "run", "(", "self", ".", "cross_entropy_grads", ",", "feed_dict", "=", "{", "self", ".", "x", ":", "xs", ",", "self", ".", "y_", ":", "ys", "}", ")" ]
Computes the gradients of the network.
[ "Computes", "the", "gradients", "of", "the", "network", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/lbfgs/driver.py#L72-L78
24,248
ray-project/ray
examples/resnet/cifar_input.py
build_data
def build_data(data_path, size, dataset): """Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels. """ image_size = 32 if dataset == "cifar10": label_bytes = 1 label_offset = 0 elif dataset == "cifar100": label_bytes = 1 label_offset = 1 depth = 3 image_bytes = image_size * image_size * depth record_bytes = label_bytes + label_offset + image_bytes def load_transform(value): # Convert these examples to dense labels and processed images. record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes]) label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32) # Convert from string to [depth * height * width] to # [depth, height, width]. depth_major = tf.reshape( tf.slice(record, [label_bytes], [image_bytes]), [depth, image_size, image_size]) # Convert from [depth, height, width] to [height, width, depth]. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) return (image, label) # Read examples from files in the filename queue. data_files = tf.gfile.Glob(data_path) data = tf.contrib.data.FixedLengthRecordDataset(data_files, record_bytes=record_bytes) data = data.map(load_transform) data = data.batch(size) iterator = data.make_one_shot_iterator() return iterator.get_next()
python
def build_data(data_path, size, dataset): """Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels. """ image_size = 32 if dataset == "cifar10": label_bytes = 1 label_offset = 0 elif dataset == "cifar100": label_bytes = 1 label_offset = 1 depth = 3 image_bytes = image_size * image_size * depth record_bytes = label_bytes + label_offset + image_bytes def load_transform(value): # Convert these examples to dense labels and processed images. record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes]) label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32) # Convert from string to [depth * height * width] to # [depth, height, width]. depth_major = tf.reshape( tf.slice(record, [label_bytes], [image_bytes]), [depth, image_size, image_size]) # Convert from [depth, height, width] to [height, width, depth]. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) return (image, label) # Read examples from files in the filename queue. data_files = tf.gfile.Glob(data_path) data = tf.contrib.data.FixedLengthRecordDataset(data_files, record_bytes=record_bytes) data = data.map(load_transform) data = data.batch(size) iterator = data.make_one_shot_iterator() return iterator.get_next()
[ "def", "build_data", "(", "data_path", ",", "size", ",", "dataset", ")", ":", "image_size", "=", "32", "if", "dataset", "==", "\"cifar10\"", ":", "label_bytes", "=", "1", "label_offset", "=", "0", "elif", "dataset", "==", "\"cifar100\"", ":", "label_bytes", "=", "1", "label_offset", "=", "1", "depth", "=", "3", "image_bytes", "=", "image_size", "*", "image_size", "*", "depth", "record_bytes", "=", "label_bytes", "+", "label_offset", "+", "image_bytes", "def", "load_transform", "(", "value", ")", ":", "# Convert these examples to dense labels and processed images.", "record", "=", "tf", ".", "reshape", "(", "tf", ".", "decode_raw", "(", "value", ",", "tf", ".", "uint8", ")", ",", "[", "record_bytes", "]", ")", "label", "=", "tf", ".", "cast", "(", "tf", ".", "slice", "(", "record", ",", "[", "label_offset", "]", ",", "[", "label_bytes", "]", ")", ",", "tf", ".", "int32", ")", "# Convert from string to [depth * height * width] to", "# [depth, height, width].", "depth_major", "=", "tf", ".", "reshape", "(", "tf", ".", "slice", "(", "record", ",", "[", "label_bytes", "]", ",", "[", "image_bytes", "]", ")", ",", "[", "depth", ",", "image_size", ",", "image_size", "]", ")", "# Convert from [depth, height, width] to [height, width, depth].", "image", "=", "tf", ".", "cast", "(", "tf", ".", "transpose", "(", "depth_major", ",", "[", "1", ",", "2", ",", "0", "]", ")", ",", "tf", ".", "float32", ")", "return", "(", "image", ",", "label", ")", "# Read examples from files in the filename queue.", "data_files", "=", "tf", ".", "gfile", ".", "Glob", "(", "data_path", ")", "data", "=", "tf", ".", "contrib", ".", "data", ".", "FixedLengthRecordDataset", "(", "data_files", ",", "record_bytes", "=", "record_bytes", ")", "data", "=", "data", ".", "map", "(", "load_transform", ")", "data", "=", "data", ".", "batch", "(", "size", ")", "iterator", "=", "data", ".", "make_one_shot_iterator", "(", ")", "return", "iterator", ".", "get_next", "(", ")" ]
Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels.
[ "Creates", "the", "queue", "and", "preprocessing", "operations", "for", "the", "dataset", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/resnet/cifar_input.py#L12-L54
24,249
ray-project/ray
examples/resnet/cifar_input.py
build_input
def build_input(data, batch_size, dataset, train): """Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported. """ image_size = 32 depth = 3 num_classes = 10 if dataset == "cifar10" else 100 images, labels = data num_samples = images.shape[0] - images.shape[0] % batch_size dataset = tf.contrib.data.Dataset.from_tensor_slices( (images[:num_samples], labels[:num_samples])) def map_train(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4, image_size + 4) image = tf.random_crop(image, [image_size, image_size, 3]) image = tf.image.random_flip_left_right(image) image = tf.image.per_image_standardization(image) return (image, label) def map_test(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size) image = tf.image.per_image_standardization(image) return (image, label) dataset = dataset.map(map_train if train else map_test) dataset = dataset.batch(batch_size) dataset = dataset.repeat() if train: dataset = dataset.shuffle(buffer_size=16 * batch_size) images, labels = dataset.make_one_shot_iterator().get_next() images = tf.reshape(images, [batch_size, image_size, image_size, depth]) labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) labels = tf.sparse_to_dense( tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0) assert len(images.get_shape()) == 4 assert images.get_shape()[0] == batch_size assert images.get_shape()[-1] == 3 assert len(labels.get_shape()) == 2 assert labels.get_shape()[0] == batch_size assert labels.get_shape()[1] == num_classes if not train: tf.summary.image("images", images) return images, labels
python
def build_input(data, batch_size, dataset, train): """Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported. """ image_size = 32 depth = 3 num_classes = 10 if dataset == "cifar10" else 100 images, labels = data num_samples = images.shape[0] - images.shape[0] % batch_size dataset = tf.contrib.data.Dataset.from_tensor_slices( (images[:num_samples], labels[:num_samples])) def map_train(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4, image_size + 4) image = tf.random_crop(image, [image_size, image_size, 3]) image = tf.image.random_flip_left_right(image) image = tf.image.per_image_standardization(image) return (image, label) def map_test(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size) image = tf.image.per_image_standardization(image) return (image, label) dataset = dataset.map(map_train if train else map_test) dataset = dataset.batch(batch_size) dataset = dataset.repeat() if train: dataset = dataset.shuffle(buffer_size=16 * batch_size) images, labels = dataset.make_one_shot_iterator().get_next() images = tf.reshape(images, [batch_size, image_size, image_size, depth]) labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) labels = tf.sparse_to_dense( tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0) assert len(images.get_shape()) == 4 assert images.get_shape()[0] == batch_size assert images.get_shape()[-1] == 3 assert len(labels.get_shape()) == 2 assert labels.get_shape()[0] == batch_size assert labels.get_shape()[1] == num_classes if not train: tf.summary.image("images", images) return images, labels
[ "def", "build_input", "(", "data", ",", "batch_size", ",", "dataset", ",", "train", ")", ":", "image_size", "=", "32", "depth", "=", "3", "num_classes", "=", "10", "if", "dataset", "==", "\"cifar10\"", "else", "100", "images", ",", "labels", "=", "data", "num_samples", "=", "images", ".", "shape", "[", "0", "]", "-", "images", ".", "shape", "[", "0", "]", "%", "batch_size", "dataset", "=", "tf", ".", "contrib", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "(", "images", "[", ":", "num_samples", "]", ",", "labels", "[", ":", "num_samples", "]", ")", ")", "def", "map_train", "(", "image", ",", "label", ")", ":", "image", "=", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "image", ",", "image_size", "+", "4", ",", "image_size", "+", "4", ")", "image", "=", "tf", ".", "random_crop", "(", "image", ",", "[", "image_size", ",", "image_size", ",", "3", "]", ")", "image", "=", "tf", ".", "image", ".", "random_flip_left_right", "(", "image", ")", "image", "=", "tf", ".", "image", ".", "per_image_standardization", "(", "image", ")", "return", "(", "image", ",", "label", ")", "def", "map_test", "(", "image", ",", "label", ")", ":", "image", "=", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "image", ",", "image_size", ",", "image_size", ")", "image", "=", "tf", ".", "image", ".", "per_image_standardization", "(", "image", ")", "return", "(", "image", ",", "label", ")", "dataset", "=", "dataset", ".", "map", "(", "map_train", "if", "train", "else", "map_test", ")", "dataset", "=", "dataset", ".", "batch", "(", "batch_size", ")", "dataset", "=", "dataset", ".", "repeat", "(", ")", "if", "train", ":", "dataset", "=", "dataset", ".", "shuffle", "(", "buffer_size", "=", "16", "*", "batch_size", ")", "images", ",", "labels", "=", "dataset", ".", "make_one_shot_iterator", "(", ")", ".", "get_next", "(", ")", "images", "=", "tf", ".", "reshape", "(", "images", ",", "[", "batch_size", ",", "image_size", ",", "image_size", ",", "depth", "]", ")", "labels", "=", "tf", ".", "reshape", "(", "labels", ",", "[", "batch_size", ",", "1", "]", ")", "indices", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "0", ",", "batch_size", ",", "1", ")", ",", "[", "batch_size", ",", "1", "]", ")", "labels", "=", "tf", ".", "sparse_to_dense", "(", "tf", ".", "concat", "(", "[", "indices", ",", "labels", "]", ",", "1", ")", ",", "[", "batch_size", ",", "num_classes", "]", ",", "1.0", ",", "0.0", ")", "assert", "len", "(", "images", ".", "get_shape", "(", ")", ")", "==", "4", "assert", "images", ".", "get_shape", "(", ")", "[", "0", "]", "==", "batch_size", "assert", "images", ".", "get_shape", "(", ")", "[", "-", "1", "]", "==", "3", "assert", "len", "(", "labels", ".", "get_shape", "(", ")", ")", "==", "2", "assert", "labels", ".", "get_shape", "(", ")", "[", "0", "]", "==", "batch_size", "assert", "labels", ".", "get_shape", "(", ")", "[", "1", "]", "==", "num_classes", "if", "not", "train", ":", "tf", ".", "summary", ".", "image", "(", "\"images\"", ",", "images", ")", "return", "images", ",", "labels" ]
Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported.
[ "Build", "CIFAR", "image", "and", "labels", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/resnet/cifar_input.py#L57-L116
24,250
ray-project/ray
python/ray/scripts/scripts.py
create_or_update
def create_or_update(cluster_config_file, min_workers, max_workers, no_restart, restart_only, yes, cluster_name): """Create or update a Ray cluster.""" if restart_only or no_restart: assert restart_only != no_restart, "Cannot set both 'restart_only' " \ "and 'no_restart' at the same time!" create_or_update_cluster(cluster_config_file, min_workers, max_workers, no_restart, restart_only, yes, cluster_name)
python
def create_or_update(cluster_config_file, min_workers, max_workers, no_restart, restart_only, yes, cluster_name): """Create or update a Ray cluster.""" if restart_only or no_restart: assert restart_only != no_restart, "Cannot set both 'restart_only' " \ "and 'no_restart' at the same time!" create_or_update_cluster(cluster_config_file, min_workers, max_workers, no_restart, restart_only, yes, cluster_name)
[ "def", "create_or_update", "(", "cluster_config_file", ",", "min_workers", ",", "max_workers", ",", "no_restart", ",", "restart_only", ",", "yes", ",", "cluster_name", ")", ":", "if", "restart_only", "or", "no_restart", ":", "assert", "restart_only", "!=", "no_restart", ",", "\"Cannot set both 'restart_only' \"", "\"and 'no_restart' at the same time!\"", "create_or_update_cluster", "(", "cluster_config_file", ",", "min_workers", ",", "max_workers", ",", "no_restart", ",", "restart_only", ",", "yes", ",", "cluster_name", ")" ]
Create or update a Ray cluster.
[ "Create", "or", "update", "a", "Ray", "cluster", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/scripts/scripts.py#L453-L460
24,251
ray-project/ray
python/ray/scripts/scripts.py
teardown
def teardown(cluster_config_file, yes, workers_only, cluster_name): """Tear down the Ray cluster.""" teardown_cluster(cluster_config_file, yes, workers_only, cluster_name)
python
def teardown(cluster_config_file, yes, workers_only, cluster_name): """Tear down the Ray cluster.""" teardown_cluster(cluster_config_file, yes, workers_only, cluster_name)
[ "def", "teardown", "(", "cluster_config_file", ",", "yes", ",", "workers_only", ",", "cluster_name", ")", ":", "teardown_cluster", "(", "cluster_config_file", ",", "yes", ",", "workers_only", ",", "cluster_name", ")" ]
Tear down the Ray cluster.
[ "Tear", "down", "the", "Ray", "cluster", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/scripts/scripts.py#L482-L484
24,252
ray-project/ray
python/ray/scripts/scripts.py
kill_random_node
def kill_random_node(cluster_config_file, yes, cluster_name): """Kills a random Ray node. For testing purposes only.""" click.echo("Killed node with IP " + kill_node(cluster_config_file, yes, cluster_name))
python
def kill_random_node(cluster_config_file, yes, cluster_name): """Kills a random Ray node. For testing purposes only.""" click.echo("Killed node with IP " + kill_node(cluster_config_file, yes, cluster_name))
[ "def", "kill_random_node", "(", "cluster_config_file", ",", "yes", ",", "cluster_name", ")", ":", "click", ".", "echo", "(", "\"Killed node with IP \"", "+", "kill_node", "(", "cluster_config_file", ",", "yes", ",", "cluster_name", ")", ")" ]
Kills a random Ray node. For testing purposes only.
[ "Kills", "a", "random", "Ray", "node", ".", "For", "testing", "purposes", "only", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/scripts/scripts.py#L501-L504
24,253
ray-project/ray
python/ray/scripts/scripts.py
submit
def submit(cluster_config_file, docker, screen, tmux, stop, start, cluster_name, port_forward, script, script_args): """Uploads and runs a script on the specified cluster. The script is automatically synced to the following location: os.path.join("~", os.path.basename(script)) """ assert not (screen and tmux), "Can specify only one of `screen` or `tmux`." if start: create_or_update_cluster(cluster_config_file, None, None, False, False, True, cluster_name) target = os.path.join("~", os.path.basename(script)) rsync(cluster_config_file, script, target, cluster_name, down=False) cmd = " ".join(["python", target] + list(script_args)) exec_cluster(cluster_config_file, cmd, docker, screen, tmux, stop, False, cluster_name, port_forward)
python
def submit(cluster_config_file, docker, screen, tmux, stop, start, cluster_name, port_forward, script, script_args): """Uploads and runs a script on the specified cluster. The script is automatically synced to the following location: os.path.join("~", os.path.basename(script)) """ assert not (screen and tmux), "Can specify only one of `screen` or `tmux`." if start: create_or_update_cluster(cluster_config_file, None, None, False, False, True, cluster_name) target = os.path.join("~", os.path.basename(script)) rsync(cluster_config_file, script, target, cluster_name, down=False) cmd = " ".join(["python", target] + list(script_args)) exec_cluster(cluster_config_file, cmd, docker, screen, tmux, stop, False, cluster_name, port_forward)
[ "def", "submit", "(", "cluster_config_file", ",", "docker", ",", "screen", ",", "tmux", ",", "stop", ",", "start", ",", "cluster_name", ",", "port_forward", ",", "script", ",", "script_args", ")", ":", "assert", "not", "(", "screen", "and", "tmux", ")", ",", "\"Can specify only one of `screen` or `tmux`.\"", "if", "start", ":", "create_or_update_cluster", "(", "cluster_config_file", ",", "None", ",", "None", ",", "False", ",", "False", ",", "True", ",", "cluster_name", ")", "target", "=", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "os", ".", "path", ".", "basename", "(", "script", ")", ")", "rsync", "(", "cluster_config_file", ",", "script", ",", "target", ",", "cluster_name", ",", "down", "=", "False", ")", "cmd", "=", "\" \"", ".", "join", "(", "[", "\"python\"", ",", "target", "]", "+", "list", "(", "script_args", ")", ")", "exec_cluster", "(", "cluster_config_file", ",", "cmd", ",", "docker", ",", "screen", ",", "tmux", ",", "stop", ",", "False", ",", "cluster_name", ",", "port_forward", ")" ]
Uploads and runs a script on the specified cluster. The script is automatically synced to the following location: os.path.join("~", os.path.basename(script))
[ "Uploads", "and", "runs", "a", "script", "on", "the", "specified", "cluster", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/scripts/scripts.py#L590-L609
24,254
ray-project/ray
examples/resnet/resnet_model.py
ResNet.build_graph
def build_graph(self): """Build a whole graph for the model.""" self.global_step = tf.Variable(0, trainable=False) self._build_model() if self.mode == "train": self._build_train_op() else: # Additional initialization for the test network. self.variables = ray.experimental.tf_utils.TensorFlowVariables( self.cost) self.summaries = tf.summary.merge_all()
python
def build_graph(self): """Build a whole graph for the model.""" self.global_step = tf.Variable(0, trainable=False) self._build_model() if self.mode == "train": self._build_train_op() else: # Additional initialization for the test network. self.variables = ray.experimental.tf_utils.TensorFlowVariables( self.cost) self.summaries = tf.summary.merge_all()
[ "def", "build_graph", "(", "self", ")", ":", "self", ".", "global_step", "=", "tf", ".", "Variable", "(", "0", ",", "trainable", "=", "False", ")", "self", ".", "_build_model", "(", ")", "if", "self", ".", "mode", "==", "\"train\"", ":", "self", ".", "_build_train_op", "(", ")", "else", ":", "# Additional initialization for the test network.", "self", ".", "variables", "=", "ray", ".", "experimental", ".", "tf_utils", ".", "TensorFlowVariables", "(", "self", ".", "cost", ")", "self", ".", "summaries", "=", "tf", ".", "summary", ".", "merge_all", "(", ")" ]
Build a whole graph for the model.
[ "Build", "a", "whole", "graph", "for", "the", "model", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/resnet/resnet_model.py#L49-L59
24,255
ray-project/ray
python/ray/rllib/agents/qmix/qmix_policy_graph.py
_mac
def _mac(model, obs, h): """Forward pass of the multi-agent controller. Arguments: model: TorchModel class obs: Tensor of shape [B, n_agents, obs_size] h: List of tensors of shape [B, n_agents, h_size] Returns: q_vals: Tensor of shape [B, n_agents, n_actions] h: Tensor of shape [B, n_agents, h_size] """ B, n_agents = obs.size(0), obs.size(1) obs_flat = obs.reshape([B * n_agents, -1]) h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, _, _, h_flat = model.forward({"obs": obs_flat}, h_flat) return q_flat.reshape( [B, n_agents, -1]), [s.reshape([B, n_agents, -1]) for s in h_flat]
python
def _mac(model, obs, h): """Forward pass of the multi-agent controller. Arguments: model: TorchModel class obs: Tensor of shape [B, n_agents, obs_size] h: List of tensors of shape [B, n_agents, h_size] Returns: q_vals: Tensor of shape [B, n_agents, n_actions] h: Tensor of shape [B, n_agents, h_size] """ B, n_agents = obs.size(0), obs.size(1) obs_flat = obs.reshape([B * n_agents, -1]) h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, _, _, h_flat = model.forward({"obs": obs_flat}, h_flat) return q_flat.reshape( [B, n_agents, -1]), [s.reshape([B, n_agents, -1]) for s in h_flat]
[ "def", "_mac", "(", "model", ",", "obs", ",", "h", ")", ":", "B", ",", "n_agents", "=", "obs", ".", "size", "(", "0", ")", ",", "obs", ".", "size", "(", "1", ")", "obs_flat", "=", "obs", ".", "reshape", "(", "[", "B", "*", "n_agents", ",", "-", "1", "]", ")", "h_flat", "=", "[", "s", ".", "reshape", "(", "[", "B", "*", "n_agents", ",", "-", "1", "]", ")", "for", "s", "in", "h", "]", "q_flat", ",", "_", ",", "_", ",", "h_flat", "=", "model", ".", "forward", "(", "{", "\"obs\"", ":", "obs_flat", "}", ",", "h_flat", ")", "return", "q_flat", ".", "reshape", "(", "[", "B", ",", "n_agents", ",", "-", "1", "]", ")", ",", "[", "s", ".", "reshape", "(", "[", "B", ",", "n_agents", ",", "-", "1", "]", ")", "for", "s", "in", "h_flat", "]" ]
Forward pass of the multi-agent controller. Arguments: model: TorchModel class obs: Tensor of shape [B, n_agents, obs_size] h: List of tensors of shape [B, n_agents, h_size] Returns: q_vals: Tensor of shape [B, n_agents, n_actions] h: Tensor of shape [B, n_agents, h_size]
[ "Forward", "pass", "of", "the", "multi", "-", "agent", "controller", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/agents/qmix/qmix_policy_graph.py#L409-L426
24,256
ray-project/ray
python/ray/rllib/agents/qmix/qmix_policy_graph.py
QMixLoss.forward
def forward(self, rewards, actions, terminated, mask, obs, action_mask): """Forward pass of the loss. Arguments: rewards: Tensor of shape [B, T-1, n_agents] actions: Tensor of shape [B, T-1, n_agents] terminated: Tensor of shape [B, T-1, n_agents] mask: Tensor of shape [B, T-1, n_agents] obs: Tensor of shape [B, T, n_agents, obs_size] action_mask: Tensor of shape [B, T, n_agents, n_actions] """ B, T = obs.size(0), obs.size(1) # Calculate estimated Q-Values mac_out = [] h = [s.expand([B, self.n_agents, -1]) for s in self.model.state_init()] for t in range(T): q, h = _mac(self.model, obs[:, t], h) mac_out.append(q) mac_out = th.stack(mac_out, dim=1) # Concat over time # Pick the Q-Values for the actions taken -> [B * n_agents, T-1] chosen_action_qvals = th.gather( mac_out[:, :-1], dim=3, index=actions.unsqueeze(3)).squeeze(3) # Calculate the Q-Values necessary for the target target_mac_out = [] target_h = [ s.expand([B, self.n_agents, -1]) for s in self.target_model.state_init() ] for t in range(T): target_q, target_h = _mac(self.target_model, obs[:, t], target_h) target_mac_out.append(target_q) # We don't need the first timesteps Q-Value estimate for targets target_mac_out = th.stack( target_mac_out[1:], dim=1) # Concat across time # Mask out unavailable actions target_mac_out[action_mask[:, 1:] == 0] = -9999999 # Max over target Q-Values if self.double_q: # Get actions that maximise live Q (for double q-learning) mac_out[action_mask == 0] = -9999999 cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1] target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3) else: target_max_qvals = target_mac_out.max(dim=3)[0] # Mix if self.mixer is not None: # TODO(ekl) add support for handling global state? This is just # treating the stacked agent obs as the state. chosen_action_qvals = self.mixer(chosen_action_qvals, obs[:, :-1]) target_max_qvals = self.target_mixer(target_max_qvals, obs[:, 1:]) # Calculate 1-step Q-Learning targets targets = rewards + self.gamma * (1 - terminated) * target_max_qvals # Td-error td_error = (chosen_action_qvals - targets.detach()) mask = mask.expand_as(td_error) # 0-out the targets that came from padded data masked_td_error = td_error * mask # Normal L2 loss, take mean over actual data loss = (masked_td_error**2).sum() / mask.sum() return loss, mask, masked_td_error, chosen_action_qvals, targets
python
def forward(self, rewards, actions, terminated, mask, obs, action_mask): """Forward pass of the loss. Arguments: rewards: Tensor of shape [B, T-1, n_agents] actions: Tensor of shape [B, T-1, n_agents] terminated: Tensor of shape [B, T-1, n_agents] mask: Tensor of shape [B, T-1, n_agents] obs: Tensor of shape [B, T, n_agents, obs_size] action_mask: Tensor of shape [B, T, n_agents, n_actions] """ B, T = obs.size(0), obs.size(1) # Calculate estimated Q-Values mac_out = [] h = [s.expand([B, self.n_agents, -1]) for s in self.model.state_init()] for t in range(T): q, h = _mac(self.model, obs[:, t], h) mac_out.append(q) mac_out = th.stack(mac_out, dim=1) # Concat over time # Pick the Q-Values for the actions taken -> [B * n_agents, T-1] chosen_action_qvals = th.gather( mac_out[:, :-1], dim=3, index=actions.unsqueeze(3)).squeeze(3) # Calculate the Q-Values necessary for the target target_mac_out = [] target_h = [ s.expand([B, self.n_agents, -1]) for s in self.target_model.state_init() ] for t in range(T): target_q, target_h = _mac(self.target_model, obs[:, t], target_h) target_mac_out.append(target_q) # We don't need the first timesteps Q-Value estimate for targets target_mac_out = th.stack( target_mac_out[1:], dim=1) # Concat across time # Mask out unavailable actions target_mac_out[action_mask[:, 1:] == 0] = -9999999 # Max over target Q-Values if self.double_q: # Get actions that maximise live Q (for double q-learning) mac_out[action_mask == 0] = -9999999 cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1] target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3) else: target_max_qvals = target_mac_out.max(dim=3)[0] # Mix if self.mixer is not None: # TODO(ekl) add support for handling global state? This is just # treating the stacked agent obs as the state. chosen_action_qvals = self.mixer(chosen_action_qvals, obs[:, :-1]) target_max_qvals = self.target_mixer(target_max_qvals, obs[:, 1:]) # Calculate 1-step Q-Learning targets targets = rewards + self.gamma * (1 - terminated) * target_max_qvals # Td-error td_error = (chosen_action_qvals - targets.detach()) mask = mask.expand_as(td_error) # 0-out the targets that came from padded data masked_td_error = td_error * mask # Normal L2 loss, take mean over actual data loss = (masked_td_error**2).sum() / mask.sum() return loss, mask, masked_td_error, chosen_action_qvals, targets
[ "def", "forward", "(", "self", ",", "rewards", ",", "actions", ",", "terminated", ",", "mask", ",", "obs", ",", "action_mask", ")", ":", "B", ",", "T", "=", "obs", ".", "size", "(", "0", ")", ",", "obs", ".", "size", "(", "1", ")", "# Calculate estimated Q-Values", "mac_out", "=", "[", "]", "h", "=", "[", "s", ".", "expand", "(", "[", "B", ",", "self", ".", "n_agents", ",", "-", "1", "]", ")", "for", "s", "in", "self", ".", "model", ".", "state_init", "(", ")", "]", "for", "t", "in", "range", "(", "T", ")", ":", "q", ",", "h", "=", "_mac", "(", "self", ".", "model", ",", "obs", "[", ":", ",", "t", "]", ",", "h", ")", "mac_out", ".", "append", "(", "q", ")", "mac_out", "=", "th", ".", "stack", "(", "mac_out", ",", "dim", "=", "1", ")", "# Concat over time", "# Pick the Q-Values for the actions taken -> [B * n_agents, T-1]", "chosen_action_qvals", "=", "th", ".", "gather", "(", "mac_out", "[", ":", ",", ":", "-", "1", "]", ",", "dim", "=", "3", ",", "index", "=", "actions", ".", "unsqueeze", "(", "3", ")", ")", ".", "squeeze", "(", "3", ")", "# Calculate the Q-Values necessary for the target", "target_mac_out", "=", "[", "]", "target_h", "=", "[", "s", ".", "expand", "(", "[", "B", ",", "self", ".", "n_agents", ",", "-", "1", "]", ")", "for", "s", "in", "self", ".", "target_model", ".", "state_init", "(", ")", "]", "for", "t", "in", "range", "(", "T", ")", ":", "target_q", ",", "target_h", "=", "_mac", "(", "self", ".", "target_model", ",", "obs", "[", ":", ",", "t", "]", ",", "target_h", ")", "target_mac_out", ".", "append", "(", "target_q", ")", "# We don't need the first timesteps Q-Value estimate for targets", "target_mac_out", "=", "th", ".", "stack", "(", "target_mac_out", "[", "1", ":", "]", ",", "dim", "=", "1", ")", "# Concat across time", "# Mask out unavailable actions", "target_mac_out", "[", "action_mask", "[", ":", ",", "1", ":", "]", "==", "0", "]", "=", "-", "9999999", "# Max over target Q-Values", "if", "self", ".", "double_q", ":", "# Get actions that maximise live Q (for double q-learning)", "mac_out", "[", "action_mask", "==", "0", "]", "=", "-", "9999999", "cur_max_actions", "=", "mac_out", "[", ":", ",", "1", ":", "]", ".", "max", "(", "dim", "=", "3", ",", "keepdim", "=", "True", ")", "[", "1", "]", "target_max_qvals", "=", "th", ".", "gather", "(", "target_mac_out", ",", "3", ",", "cur_max_actions", ")", ".", "squeeze", "(", "3", ")", "else", ":", "target_max_qvals", "=", "target_mac_out", ".", "max", "(", "dim", "=", "3", ")", "[", "0", "]", "# Mix", "if", "self", ".", "mixer", "is", "not", "None", ":", "# TODO(ekl) add support for handling global state? This is just", "# treating the stacked agent obs as the state.", "chosen_action_qvals", "=", "self", ".", "mixer", "(", "chosen_action_qvals", ",", "obs", "[", ":", ",", ":", "-", "1", "]", ")", "target_max_qvals", "=", "self", ".", "target_mixer", "(", "target_max_qvals", ",", "obs", "[", ":", ",", "1", ":", "]", ")", "# Calculate 1-step Q-Learning targets", "targets", "=", "rewards", "+", "self", ".", "gamma", "*", "(", "1", "-", "terminated", ")", "*", "target_max_qvals", "# Td-error", "td_error", "=", "(", "chosen_action_qvals", "-", "targets", ".", "detach", "(", ")", ")", "mask", "=", "mask", ".", "expand_as", "(", "td_error", ")", "# 0-out the targets that came from padded data", "masked_td_error", "=", "td_error", "*", "mask", "# Normal L2 loss, take mean over actual data", "loss", "=", "(", "masked_td_error", "**", "2", ")", ".", "sum", "(", ")", "/", "mask", ".", "sum", "(", ")", "return", "loss", ",", "mask", ",", "masked_td_error", ",", "chosen_action_qvals", ",", "targets" ]
Forward pass of the loss. Arguments: rewards: Tensor of shape [B, T-1, n_agents] actions: Tensor of shape [B, T-1, n_agents] terminated: Tensor of shape [B, T-1, n_agents] mask: Tensor of shape [B, T-1, n_agents] obs: Tensor of shape [B, T, n_agents, obs_size] action_mask: Tensor of shape [B, T, n_agents, n_actions]
[ "Forward", "pass", "of", "the", "loss", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/agents/qmix/qmix_policy_graph.py#L49-L122
24,257
ray-project/ray
python/ray/experimental/named_actors.py
get_actor
def get_actor(name): """Get a named actor which was previously created. If the actor doesn't exist, an exception will be raised. Args: name: The name of the named actor. Returns: The ActorHandle object corresponding to the name. """ actor_name = _calculate_key(name) pickled_state = _internal_kv_get(actor_name) if pickled_state is None: raise ValueError("The actor with name={} doesn't exist".format(name)) handle = pickle.loads(pickled_state) return handle
python
def get_actor(name): """Get a named actor which was previously created. If the actor doesn't exist, an exception will be raised. Args: name: The name of the named actor. Returns: The ActorHandle object corresponding to the name. """ actor_name = _calculate_key(name) pickled_state = _internal_kv_get(actor_name) if pickled_state is None: raise ValueError("The actor with name={} doesn't exist".format(name)) handle = pickle.loads(pickled_state) return handle
[ "def", "get_actor", "(", "name", ")", ":", "actor_name", "=", "_calculate_key", "(", "name", ")", "pickled_state", "=", "_internal_kv_get", "(", "actor_name", ")", "if", "pickled_state", "is", "None", ":", "raise", "ValueError", "(", "\"The actor with name={} doesn't exist\"", ".", "format", "(", "name", ")", ")", "handle", "=", "pickle", ".", "loads", "(", "pickled_state", ")", "return", "handle" ]
Get a named actor which was previously created. If the actor doesn't exist, an exception will be raised. Args: name: The name of the named actor. Returns: The ActorHandle object corresponding to the name.
[ "Get", "a", "named", "actor", "which", "was", "previously", "created", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/named_actors.py#L22-L38
24,258
ray-project/ray
python/ray/experimental/named_actors.py
register_actor
def register_actor(name, actor_handle): """Register a named actor under a string key. Args: name: The name of the named actor. actor_handle: The actor object to be associated with this name """ if not isinstance(name, str): raise TypeError("The name argument must be a string.") if not isinstance(actor_handle, ray.actor.ActorHandle): raise TypeError("The actor_handle argument must be an ActorHandle " "object.") actor_name = _calculate_key(name) pickled_state = pickle.dumps(actor_handle) # Add the actor to Redis if it does not already exist. already_exists = _internal_kv_put(actor_name, pickled_state) if already_exists: # If the registration fails, then erase the new actor handle that # was added when pickling the actor handle. actor_handle._ray_new_actor_handles.pop() raise ValueError( "Error: the actor with name={} already exists".format(name))
python
def register_actor(name, actor_handle): """Register a named actor under a string key. Args: name: The name of the named actor. actor_handle: The actor object to be associated with this name """ if not isinstance(name, str): raise TypeError("The name argument must be a string.") if not isinstance(actor_handle, ray.actor.ActorHandle): raise TypeError("The actor_handle argument must be an ActorHandle " "object.") actor_name = _calculate_key(name) pickled_state = pickle.dumps(actor_handle) # Add the actor to Redis if it does not already exist. already_exists = _internal_kv_put(actor_name, pickled_state) if already_exists: # If the registration fails, then erase the new actor handle that # was added when pickling the actor handle. actor_handle._ray_new_actor_handles.pop() raise ValueError( "Error: the actor with name={} already exists".format(name))
[ "def", "register_actor", "(", "name", ",", "actor_handle", ")", ":", "if", "not", "isinstance", "(", "name", ",", "str", ")", ":", "raise", "TypeError", "(", "\"The name argument must be a string.\"", ")", "if", "not", "isinstance", "(", "actor_handle", ",", "ray", ".", "actor", ".", "ActorHandle", ")", ":", "raise", "TypeError", "(", "\"The actor_handle argument must be an ActorHandle \"", "\"object.\"", ")", "actor_name", "=", "_calculate_key", "(", "name", ")", "pickled_state", "=", "pickle", ".", "dumps", "(", "actor_handle", ")", "# Add the actor to Redis if it does not already exist.", "already_exists", "=", "_internal_kv_put", "(", "actor_name", ",", "pickled_state", ")", "if", "already_exists", ":", "# If the registration fails, then erase the new actor handle that", "# was added when pickling the actor handle.", "actor_handle", ".", "_ray_new_actor_handles", ".", "pop", "(", ")", "raise", "ValueError", "(", "\"Error: the actor with name={} already exists\"", ".", "format", "(", "name", ")", ")" ]
Register a named actor under a string key. Args: name: The name of the named actor. actor_handle: The actor object to be associated with this name
[ "Register", "a", "named", "actor", "under", "a", "string", "key", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/named_actors.py#L41-L63
24,259
ray-project/ray
python/ray/autoscaler/autoscaler.py
check_extraneous
def check_extraneous(config, schema): """Make sure all items of config are in schema""" if not isinstance(config, dict): raise ValueError("Config {} is not a dictionary".format(config)) for k in config: if k not in schema: raise ValueError("Unexpected config key `{}` not in {}".format( k, list(schema.keys()))) v, kreq = schema[k] if v is None: continue elif isinstance(v, type): if not isinstance(config[k], v): if v is str and isinstance(config[k], string_types): continue raise ValueError( "Config key `{}` has wrong type {}, expected {}".format( k, type(config[k]).__name__, v.__name__)) else: check_extraneous(config[k], v)
python
def check_extraneous(config, schema): """Make sure all items of config are in schema""" if not isinstance(config, dict): raise ValueError("Config {} is not a dictionary".format(config)) for k in config: if k not in schema: raise ValueError("Unexpected config key `{}` not in {}".format( k, list(schema.keys()))) v, kreq = schema[k] if v is None: continue elif isinstance(v, type): if not isinstance(config[k], v): if v is str and isinstance(config[k], string_types): continue raise ValueError( "Config key `{}` has wrong type {}, expected {}".format( k, type(config[k]).__name__, v.__name__)) else: check_extraneous(config[k], v)
[ "def", "check_extraneous", "(", "config", ",", "schema", ")", ":", "if", "not", "isinstance", "(", "config", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Config {} is not a dictionary\"", ".", "format", "(", "config", ")", ")", "for", "k", "in", "config", ":", "if", "k", "not", "in", "schema", ":", "raise", "ValueError", "(", "\"Unexpected config key `{}` not in {}\"", ".", "format", "(", "k", ",", "list", "(", "schema", ".", "keys", "(", ")", ")", ")", ")", "v", ",", "kreq", "=", "schema", "[", "k", "]", "if", "v", "is", "None", ":", "continue", "elif", "isinstance", "(", "v", ",", "type", ")", ":", "if", "not", "isinstance", "(", "config", "[", "k", "]", ",", "v", ")", ":", "if", "v", "is", "str", "and", "isinstance", "(", "config", "[", "k", "]", ",", "string_types", ")", ":", "continue", "raise", "ValueError", "(", "\"Config key `{}` has wrong type {}, expected {}\"", ".", "format", "(", "k", ",", "type", "(", "config", "[", "k", "]", ")", ".", "__name__", ",", "v", ".", "__name__", ")", ")", "else", ":", "check_extraneous", "(", "config", "[", "k", "]", ",", "v", ")" ]
Make sure all items of config are in schema
[ "Make", "sure", "all", "items", "of", "config", "are", "in", "schema" ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/autoscaler/autoscaler.py#L681-L701
24,260
ray-project/ray
python/ray/autoscaler/autoscaler.py
validate_config
def validate_config(config, schema=CLUSTER_CONFIG_SCHEMA): """Required Dicts indicate that no extra fields can be introduced.""" if not isinstance(config, dict): raise ValueError("Config {} is not a dictionary".format(config)) check_required(config, schema) check_extraneous(config, schema)
python
def validate_config(config, schema=CLUSTER_CONFIG_SCHEMA): """Required Dicts indicate that no extra fields can be introduced.""" if not isinstance(config, dict): raise ValueError("Config {} is not a dictionary".format(config)) check_required(config, schema) check_extraneous(config, schema)
[ "def", "validate_config", "(", "config", ",", "schema", "=", "CLUSTER_CONFIG_SCHEMA", ")", ":", "if", "not", "isinstance", "(", "config", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Config {} is not a dictionary\"", ".", "format", "(", "config", ")", ")", "check_required", "(", "config", ",", "schema", ")", "check_extraneous", "(", "config", ",", "schema", ")" ]
Required Dicts indicate that no extra fields can be introduced.
[ "Required", "Dicts", "indicate", "that", "no", "extra", "fields", "can", "be", "introduced", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/autoscaler/autoscaler.py#L704-L710
24,261
ray-project/ray
python/ray/parameter.py
RayParams.update
def update(self, **kwargs): """Update the settings according to the keyword arguments. Args: kwargs: The keyword arguments to set corresponding fields. """ for arg in kwargs: if hasattr(self, arg): setattr(self, arg, kwargs[arg]) else: raise ValueError("Invalid RayParams parameter in" " update: %s" % arg) self._check_usage()
python
def update(self, **kwargs): """Update the settings according to the keyword arguments. Args: kwargs: The keyword arguments to set corresponding fields. """ for arg in kwargs: if hasattr(self, arg): setattr(self, arg, kwargs[arg]) else: raise ValueError("Invalid RayParams parameter in" " update: %s" % arg) self._check_usage()
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "kwargs", ":", "if", "hasattr", "(", "self", ",", "arg", ")", ":", "setattr", "(", "self", ",", "arg", ",", "kwargs", "[", "arg", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid RayParams parameter in\"", "\" update: %s\"", "%", "arg", ")", "self", ".", "_check_usage", "(", ")" ]
Update the settings according to the keyword arguments. Args: kwargs: The keyword arguments to set corresponding fields.
[ "Update", "the", "settings", "according", "to", "the", "keyword", "arguments", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/parameter.py#L149-L162
24,262
ray-project/ray
python/ray/parameter.py
RayParams.update_if_absent
def update_if_absent(self, **kwargs): """Update the settings when the target fields are None. Args: kwargs: The keyword arguments to set corresponding fields. """ for arg in kwargs: if hasattr(self, arg): if getattr(self, arg) is None: setattr(self, arg, kwargs[arg]) else: raise ValueError("Invalid RayParams parameter in" " update_if_absent: %s" % arg) self._check_usage()
python
def update_if_absent(self, **kwargs): """Update the settings when the target fields are None. Args: kwargs: The keyword arguments to set corresponding fields. """ for arg in kwargs: if hasattr(self, arg): if getattr(self, arg) is None: setattr(self, arg, kwargs[arg]) else: raise ValueError("Invalid RayParams parameter in" " update_if_absent: %s" % arg) self._check_usage()
[ "def", "update_if_absent", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "kwargs", ":", "if", "hasattr", "(", "self", ",", "arg", ")", ":", "if", "getattr", "(", "self", ",", "arg", ")", "is", "None", ":", "setattr", "(", "self", ",", "arg", ",", "kwargs", "[", "arg", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid RayParams parameter in\"", "\" update_if_absent: %s\"", "%", "arg", ")", "self", ".", "_check_usage", "(", ")" ]
Update the settings when the target fields are None. Args: kwargs: The keyword arguments to set corresponding fields.
[ "Update", "the", "settings", "when", "the", "target", "fields", "are", "None", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/parameter.py#L164-L178
24,263
ray-project/ray
python/ray/actor.py
compute_actor_handle_id
def compute_actor_handle_id(actor_handle_id, num_forks): """Deterministically compute an actor handle ID. A new actor handle ID is generated when it is forked from another actor handle. The new handle ID is computed as hash(old_handle_id || num_forks). Args: actor_handle_id (common.ObjectID): The original actor handle ID. num_forks: The number of times the original actor handle has been forked so far. Returns: An ID for the new actor handle. """ assert isinstance(actor_handle_id, ActorHandleID) handle_id_hash = hashlib.sha1() handle_id_hash.update(actor_handle_id.binary()) handle_id_hash.update(str(num_forks).encode("ascii")) handle_id = handle_id_hash.digest() return ActorHandleID(handle_id)
python
def compute_actor_handle_id(actor_handle_id, num_forks): """Deterministically compute an actor handle ID. A new actor handle ID is generated when it is forked from another actor handle. The new handle ID is computed as hash(old_handle_id || num_forks). Args: actor_handle_id (common.ObjectID): The original actor handle ID. num_forks: The number of times the original actor handle has been forked so far. Returns: An ID for the new actor handle. """ assert isinstance(actor_handle_id, ActorHandleID) handle_id_hash = hashlib.sha1() handle_id_hash.update(actor_handle_id.binary()) handle_id_hash.update(str(num_forks).encode("ascii")) handle_id = handle_id_hash.digest() return ActorHandleID(handle_id)
[ "def", "compute_actor_handle_id", "(", "actor_handle_id", ",", "num_forks", ")", ":", "assert", "isinstance", "(", "actor_handle_id", ",", "ActorHandleID", ")", "handle_id_hash", "=", "hashlib", ".", "sha1", "(", ")", "handle_id_hash", ".", "update", "(", "actor_handle_id", ".", "binary", "(", ")", ")", "handle_id_hash", ".", "update", "(", "str", "(", "num_forks", ")", ".", "encode", "(", "\"ascii\"", ")", ")", "handle_id", "=", "handle_id_hash", ".", "digest", "(", ")", "return", "ActorHandleID", "(", "handle_id", ")" ]
Deterministically compute an actor handle ID. A new actor handle ID is generated when it is forked from another actor handle. The new handle ID is computed as hash(old_handle_id || num_forks). Args: actor_handle_id (common.ObjectID): The original actor handle ID. num_forks: The number of times the original actor handle has been forked so far. Returns: An ID for the new actor handle.
[ "Deterministically", "compute", "an", "actor", "handle", "ID", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L27-L46
24,264
ray-project/ray
python/ray/actor.py
compute_actor_handle_id_non_forked
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id): """Deterministically compute an actor handle ID in the non-forked case. This code path is used whenever an actor handle is pickled and unpickled (for example, if a remote function closes over an actor handle). Then, whenever the actor handle is used, a new actor handle ID will be generated on the fly as a deterministic function of the actor ID, the previous actor handle ID and the current task ID. TODO(rkn): It may be possible to cause problems by closing over multiple actor handles in a remote function, which then get unpickled and give rise to the same actor handle IDs. Args: actor_handle_id: The original actor handle ID. current_task_id: The ID of the task that is unpickling the handle. Returns: An ID for the new actor handle. """ assert isinstance(actor_handle_id, ActorHandleID) assert isinstance(current_task_id, TaskID) handle_id_hash = hashlib.sha1() handle_id_hash.update(actor_handle_id.binary()) handle_id_hash.update(current_task_id.binary()) handle_id = handle_id_hash.digest() return ActorHandleID(handle_id)
python
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id): """Deterministically compute an actor handle ID in the non-forked case. This code path is used whenever an actor handle is pickled and unpickled (for example, if a remote function closes over an actor handle). Then, whenever the actor handle is used, a new actor handle ID will be generated on the fly as a deterministic function of the actor ID, the previous actor handle ID and the current task ID. TODO(rkn): It may be possible to cause problems by closing over multiple actor handles in a remote function, which then get unpickled and give rise to the same actor handle IDs. Args: actor_handle_id: The original actor handle ID. current_task_id: The ID of the task that is unpickling the handle. Returns: An ID for the new actor handle. """ assert isinstance(actor_handle_id, ActorHandleID) assert isinstance(current_task_id, TaskID) handle_id_hash = hashlib.sha1() handle_id_hash.update(actor_handle_id.binary()) handle_id_hash.update(current_task_id.binary()) handle_id = handle_id_hash.digest() return ActorHandleID(handle_id)
[ "def", "compute_actor_handle_id_non_forked", "(", "actor_handle_id", ",", "current_task_id", ")", ":", "assert", "isinstance", "(", "actor_handle_id", ",", "ActorHandleID", ")", "assert", "isinstance", "(", "current_task_id", ",", "TaskID", ")", "handle_id_hash", "=", "hashlib", ".", "sha1", "(", ")", "handle_id_hash", ".", "update", "(", "actor_handle_id", ".", "binary", "(", ")", ")", "handle_id_hash", ".", "update", "(", "current_task_id", ".", "binary", "(", ")", ")", "handle_id", "=", "handle_id_hash", ".", "digest", "(", ")", "return", "ActorHandleID", "(", "handle_id", ")" ]
Deterministically compute an actor handle ID in the non-forked case. This code path is used whenever an actor handle is pickled and unpickled (for example, if a remote function closes over an actor handle). Then, whenever the actor handle is used, a new actor handle ID will be generated on the fly as a deterministic function of the actor ID, the previous actor handle ID and the current task ID. TODO(rkn): It may be possible to cause problems by closing over multiple actor handles in a remote function, which then get unpickled and give rise to the same actor handle IDs. Args: actor_handle_id: The original actor handle ID. current_task_id: The ID of the task that is unpickling the handle. Returns: An ID for the new actor handle.
[ "Deterministically", "compute", "an", "actor", "handle", "ID", "in", "the", "non", "-", "forked", "case", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L49-L75
24,265
ray-project/ray
python/ray/actor.py
method
def method(*args, **kwargs): """Annotate an actor method. .. code-block:: python @ray.remote class Foo(object): @ray.method(num_return_vals=2) def bar(self): return 1, 2 f = Foo.remote() _, _ = f.bar.remote() Args: num_return_vals: The number of object IDs that should be returned by invocations of this actor method. """ assert len(args) == 0 assert len(kwargs) == 1 assert "num_return_vals" in kwargs num_return_vals = kwargs["num_return_vals"] def annotate_method(method): method.__ray_num_return_vals__ = num_return_vals return method return annotate_method
python
def method(*args, **kwargs): """Annotate an actor method. .. code-block:: python @ray.remote class Foo(object): @ray.method(num_return_vals=2) def bar(self): return 1, 2 f = Foo.remote() _, _ = f.bar.remote() Args: num_return_vals: The number of object IDs that should be returned by invocations of this actor method. """ assert len(args) == 0 assert len(kwargs) == 1 assert "num_return_vals" in kwargs num_return_vals = kwargs["num_return_vals"] def annotate_method(method): method.__ray_num_return_vals__ = num_return_vals return method return annotate_method
[ "def", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "len", "(", "args", ")", "==", "0", "assert", "len", "(", "kwargs", ")", "==", "1", "assert", "\"num_return_vals\"", "in", "kwargs", "num_return_vals", "=", "kwargs", "[", "\"num_return_vals\"", "]", "def", "annotate_method", "(", "method", ")", ":", "method", ".", "__ray_num_return_vals__", "=", "num_return_vals", "return", "method", "return", "annotate_method" ]
Annotate an actor method. .. code-block:: python @ray.remote class Foo(object): @ray.method(num_return_vals=2) def bar(self): return 1, 2 f = Foo.remote() _, _ = f.bar.remote() Args: num_return_vals: The number of object IDs that should be returned by invocations of this actor method.
[ "Annotate", "an", "actor", "method", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L78-L106
24,266
ray-project/ray
python/ray/actor.py
exit_actor
def exit_actor(): """Intentionally exit the current actor. This function is used to disconnect an actor and exit the worker. Raises: Exception: An exception is raised if this is a driver or this worker is not an actor. """ worker = ray.worker.global_worker if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil(): # Disconnect the worker from the raylet. The point of # this is so that when the worker kills itself below, the # raylet won't push an error message to the driver. worker.raylet_client.disconnect() ray.disconnect() # Disconnect global state from GCS. ray.global_state.disconnect() sys.exit(0) assert False, "This process should have terminated." else: raise Exception("exit_actor called on a non-actor worker.")
python
def exit_actor(): """Intentionally exit the current actor. This function is used to disconnect an actor and exit the worker. Raises: Exception: An exception is raised if this is a driver or this worker is not an actor. """ worker = ray.worker.global_worker if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil(): # Disconnect the worker from the raylet. The point of # this is so that when the worker kills itself below, the # raylet won't push an error message to the driver. worker.raylet_client.disconnect() ray.disconnect() # Disconnect global state from GCS. ray.global_state.disconnect() sys.exit(0) assert False, "This process should have terminated." else: raise Exception("exit_actor called on a non-actor worker.")
[ "def", "exit_actor", "(", ")", ":", "worker", "=", "ray", ".", "worker", ".", "global_worker", "if", "worker", ".", "mode", "==", "ray", ".", "WORKER_MODE", "and", "not", "worker", ".", "actor_id", ".", "is_nil", "(", ")", ":", "# Disconnect the worker from the raylet. The point of", "# this is so that when the worker kills itself below, the", "# raylet won't push an error message to the driver.", "worker", ".", "raylet_client", ".", "disconnect", "(", ")", "ray", ".", "disconnect", "(", ")", "# Disconnect global state from GCS.", "ray", ".", "global_state", ".", "disconnect", "(", ")", "sys", ".", "exit", "(", "0", ")", "assert", "False", ",", "\"This process should have terminated.\"", "else", ":", "raise", "Exception", "(", "\"exit_actor called on a non-actor worker.\"", ")" ]
Intentionally exit the current actor. This function is used to disconnect an actor and exit the worker. Raises: Exception: An exception is raised if this is a driver or this worker is not an actor.
[ "Intentionally", "exit", "the", "current", "actor", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L736-L757
24,267
ray-project/ray
python/ray/actor.py
get_checkpoints_for_actor
def get_checkpoints_for_actor(actor_id): """Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order. """ checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id) if checkpoint_info is None: return [] checkpoints = [ Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"]) ] return sorted( checkpoints, key=lambda checkpoint: checkpoint.timestamp, reverse=True, )
python
def get_checkpoints_for_actor(actor_id): """Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order. """ checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id) if checkpoint_info is None: return [] checkpoints = [ Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"]) ] return sorted( checkpoints, key=lambda checkpoint: checkpoint.timestamp, reverse=True, )
[ "def", "get_checkpoints_for_actor", "(", "actor_id", ")", ":", "checkpoint_info", "=", "ray", ".", "worker", ".", "global_state", ".", "actor_checkpoint_info", "(", "actor_id", ")", "if", "checkpoint_info", "is", "None", ":", "return", "[", "]", "checkpoints", "=", "[", "Checkpoint", "(", "checkpoint_id", ",", "timestamp", ")", "for", "checkpoint_id", ",", "timestamp", "in", "zip", "(", "checkpoint_info", "[", "\"CheckpointIds\"", "]", ",", "checkpoint_info", "[", "\"Timestamps\"", "]", ")", "]", "return", "sorted", "(", "checkpoints", ",", "key", "=", "lambda", "checkpoint", ":", "checkpoint", ".", "timestamp", ",", "reverse", "=", "True", ",", ")" ]
Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order.
[ "Get", "the", "available", "checkpoints", "for", "the", "given", "actor", "ID", "return", "a", "list", "sorted", "by", "checkpoint", "timestamp", "in", "descending", "order", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L869-L884
24,268
ray-project/ray
python/ray/actor.py
ActorHandle._actor_method_call
def _actor_method_call(self, method_name, args=None, kwargs=None, num_return_vals=None): """Method execution stub for an actor handle. This is the function that executes when `actor.method_name.remote(*args, **kwargs)` is called. Instead of executing locally, the method is packaged as a task and scheduled to the remote actor instance. Args: method_name: The name of the actor method to execute. args: A list of arguments for the actor method. kwargs: A dictionary of keyword arguments for the actor method. num_return_vals (int): The number of return values for the method. Returns: object_ids: A list of object IDs returned by the remote actor method. """ worker = ray.worker.get_global_worker() worker.check_connected() function_signature = self._ray_method_signatures[method_name] if args is None: args = [] if kwargs is None: kwargs = {} args = signature.extend_args(function_signature, args, kwargs) # Execute functions locally if Ray is run in LOCAL_MODE # Copy args to prevent the function from mutating them. if worker.mode == ray.LOCAL_MODE: return getattr(worker.actors[self._ray_actor_id], method_name)(*copy.deepcopy(args)) function_descriptor = FunctionDescriptor( self._ray_module_name, method_name, self._ray_class_name) with self._ray_actor_lock: object_ids = worker.submit_task( function_descriptor, args, actor_id=self._ray_actor_id, actor_handle_id=self._ray_actor_handle_id, actor_counter=self._ray_actor_counter, actor_creation_dummy_object_id=( self._ray_actor_creation_dummy_object_id), execution_dependencies=[self._ray_actor_cursor], new_actor_handles=self._ray_new_actor_handles, # We add one for the dummy return ID. num_return_vals=num_return_vals + 1, resources={"CPU": self._ray_actor_method_cpus}, placement_resources={}, driver_id=self._ray_actor_driver_id, ) # Update the actor counter and cursor to reflect the most recent # invocation. self._ray_actor_counter += 1 # The last object returned is the dummy object that should be # passed in to the next actor method. Do not return it to the user. self._ray_actor_cursor = object_ids.pop() # We have notified the backend of the new actor handles to expect # since the last task was submitted, so clear the list. self._ray_new_actor_handles = [] if len(object_ids) == 1: object_ids = object_ids[0] elif len(object_ids) == 0: object_ids = None return object_ids
python
def _actor_method_call(self, method_name, args=None, kwargs=None, num_return_vals=None): """Method execution stub for an actor handle. This is the function that executes when `actor.method_name.remote(*args, **kwargs)` is called. Instead of executing locally, the method is packaged as a task and scheduled to the remote actor instance. Args: method_name: The name of the actor method to execute. args: A list of arguments for the actor method. kwargs: A dictionary of keyword arguments for the actor method. num_return_vals (int): The number of return values for the method. Returns: object_ids: A list of object IDs returned by the remote actor method. """ worker = ray.worker.get_global_worker() worker.check_connected() function_signature = self._ray_method_signatures[method_name] if args is None: args = [] if kwargs is None: kwargs = {} args = signature.extend_args(function_signature, args, kwargs) # Execute functions locally if Ray is run in LOCAL_MODE # Copy args to prevent the function from mutating them. if worker.mode == ray.LOCAL_MODE: return getattr(worker.actors[self._ray_actor_id], method_name)(*copy.deepcopy(args)) function_descriptor = FunctionDescriptor( self._ray_module_name, method_name, self._ray_class_name) with self._ray_actor_lock: object_ids = worker.submit_task( function_descriptor, args, actor_id=self._ray_actor_id, actor_handle_id=self._ray_actor_handle_id, actor_counter=self._ray_actor_counter, actor_creation_dummy_object_id=( self._ray_actor_creation_dummy_object_id), execution_dependencies=[self._ray_actor_cursor], new_actor_handles=self._ray_new_actor_handles, # We add one for the dummy return ID. num_return_vals=num_return_vals + 1, resources={"CPU": self._ray_actor_method_cpus}, placement_resources={}, driver_id=self._ray_actor_driver_id, ) # Update the actor counter and cursor to reflect the most recent # invocation. self._ray_actor_counter += 1 # The last object returned is the dummy object that should be # passed in to the next actor method. Do not return it to the user. self._ray_actor_cursor = object_ids.pop() # We have notified the backend of the new actor handles to expect # since the last task was submitted, so clear the list. self._ray_new_actor_handles = [] if len(object_ids) == 1: object_ids = object_ids[0] elif len(object_ids) == 0: object_ids = None return object_ids
[ "def", "_actor_method_call", "(", "self", ",", "method_name", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "num_return_vals", "=", "None", ")", ":", "worker", "=", "ray", ".", "worker", ".", "get_global_worker", "(", ")", "worker", ".", "check_connected", "(", ")", "function_signature", "=", "self", ".", "_ray_method_signatures", "[", "method_name", "]", "if", "args", "is", "None", ":", "args", "=", "[", "]", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "args", "=", "signature", ".", "extend_args", "(", "function_signature", ",", "args", ",", "kwargs", ")", "# Execute functions locally if Ray is run in LOCAL_MODE", "# Copy args to prevent the function from mutating them.", "if", "worker", ".", "mode", "==", "ray", ".", "LOCAL_MODE", ":", "return", "getattr", "(", "worker", ".", "actors", "[", "self", ".", "_ray_actor_id", "]", ",", "method_name", ")", "(", "*", "copy", ".", "deepcopy", "(", "args", ")", ")", "function_descriptor", "=", "FunctionDescriptor", "(", "self", ".", "_ray_module_name", ",", "method_name", ",", "self", ".", "_ray_class_name", ")", "with", "self", ".", "_ray_actor_lock", ":", "object_ids", "=", "worker", ".", "submit_task", "(", "function_descriptor", ",", "args", ",", "actor_id", "=", "self", ".", "_ray_actor_id", ",", "actor_handle_id", "=", "self", ".", "_ray_actor_handle_id", ",", "actor_counter", "=", "self", ".", "_ray_actor_counter", ",", "actor_creation_dummy_object_id", "=", "(", "self", ".", "_ray_actor_creation_dummy_object_id", ")", ",", "execution_dependencies", "=", "[", "self", ".", "_ray_actor_cursor", "]", ",", "new_actor_handles", "=", "self", ".", "_ray_new_actor_handles", ",", "# We add one for the dummy return ID.", "num_return_vals", "=", "num_return_vals", "+", "1", ",", "resources", "=", "{", "\"CPU\"", ":", "self", ".", "_ray_actor_method_cpus", "}", ",", "placement_resources", "=", "{", "}", ",", "driver_id", "=", "self", ".", "_ray_actor_driver_id", ",", ")", "# Update the actor counter and cursor to reflect the most recent", "# invocation.", "self", ".", "_ray_actor_counter", "+=", "1", "# The last object returned is the dummy object that should be", "# passed in to the next actor method. Do not return it to the user.", "self", ".", "_ray_actor_cursor", "=", "object_ids", ".", "pop", "(", ")", "# We have notified the backend of the new actor handles to expect", "# since the last task was submitted, so clear the list.", "self", ".", "_ray_new_actor_handles", "=", "[", "]", "if", "len", "(", "object_ids", ")", "==", "1", ":", "object_ids", "=", "object_ids", "[", "0", "]", "elif", "len", "(", "object_ids", ")", "==", "0", ":", "object_ids", "=", "None", "return", "object_ids" ]
Method execution stub for an actor handle. This is the function that executes when `actor.method_name.remote(*args, **kwargs)` is called. Instead of executing locally, the method is packaged as a task and scheduled to the remote actor instance. Args: method_name: The name of the actor method to execute. args: A list of arguments for the actor method. kwargs: A dictionary of keyword arguments for the actor method. num_return_vals (int): The number of return values for the method. Returns: object_ids: A list of object IDs returned by the remote actor method.
[ "Method", "execution", "stub", "for", "an", "actor", "handle", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L442-L515
24,269
ray-project/ray
python/ray/rllib/optimizers/multi_gpu_impl.py
LocalSyncParallelOptimizer.optimize
def optimize(self, sess, batch_index): """Run a single step of SGD. Runs a SGD step over a slice of the preloaded batch with size given by self._loaded_per_device_batch_size and offset given by the batch_index argument. Updates shared model weights based on the averaged per-device gradients. Args: sess: TensorFlow session. batch_index: Offset into the preloaded data. This value must be between `0` and `tuples_per_device`. The amount of data to process is at most `max_per_device_batch_size`. Returns: The outputs of extra_ops evaluated over the batch. """ feed_dict = { self._batch_index: batch_index, self._per_device_batch_size: self._loaded_per_device_batch_size, self._max_seq_len: self._loaded_max_seq_len, } for tower in self._towers: feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict()) fetches = {"train": self._train_op} for tower in self._towers: fetches.update(tower.loss_graph.extra_compute_grad_fetches()) return sess.run(fetches, feed_dict=feed_dict)
python
def optimize(self, sess, batch_index): """Run a single step of SGD. Runs a SGD step over a slice of the preloaded batch with size given by self._loaded_per_device_batch_size and offset given by the batch_index argument. Updates shared model weights based on the averaged per-device gradients. Args: sess: TensorFlow session. batch_index: Offset into the preloaded data. This value must be between `0` and `tuples_per_device`. The amount of data to process is at most `max_per_device_batch_size`. Returns: The outputs of extra_ops evaluated over the batch. """ feed_dict = { self._batch_index: batch_index, self._per_device_batch_size: self._loaded_per_device_batch_size, self._max_seq_len: self._loaded_max_seq_len, } for tower in self._towers: feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict()) fetches = {"train": self._train_op} for tower in self._towers: fetches.update(tower.loss_graph.extra_compute_grad_fetches()) return sess.run(fetches, feed_dict=feed_dict)
[ "def", "optimize", "(", "self", ",", "sess", ",", "batch_index", ")", ":", "feed_dict", "=", "{", "self", ".", "_batch_index", ":", "batch_index", ",", "self", ".", "_per_device_batch_size", ":", "self", ".", "_loaded_per_device_batch_size", ",", "self", ".", "_max_seq_len", ":", "self", ".", "_loaded_max_seq_len", ",", "}", "for", "tower", "in", "self", ".", "_towers", ":", "feed_dict", ".", "update", "(", "tower", ".", "loss_graph", ".", "extra_compute_grad_feed_dict", "(", ")", ")", "fetches", "=", "{", "\"train\"", ":", "self", ".", "_train_op", "}", "for", "tower", "in", "self", ".", "_towers", ":", "fetches", ".", "update", "(", "tower", ".", "loss_graph", ".", "extra_compute_grad_fetches", "(", ")", ")", "return", "sess", ".", "run", "(", "fetches", ",", "feed_dict", "=", "feed_dict", ")" ]
Run a single step of SGD. Runs a SGD step over a slice of the preloaded batch with size given by self._loaded_per_device_batch_size and offset given by the batch_index argument. Updates shared model weights based on the averaged per-device gradients. Args: sess: TensorFlow session. batch_index: Offset into the preloaded data. This value must be between `0` and `tuples_per_device`. The amount of data to process is at most `max_per_device_batch_size`. Returns: The outputs of extra_ops evaluated over the batch.
[ "Run", "a", "single", "step", "of", "SGD", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/optimizers/multi_gpu_impl.py#L227-L258
24,270
ray-project/ray
python/ray/tune/automl/genetic_searcher.py
GeneticSearch._selection
def _selection(candidate): """Perform selection action to candidates. For example, new gene = sample_1 + the 5th bit of sample2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _selection([gene1, gene2]) >>> # new_gene could be gene1 overwritten with the >>> # 2nd parameter of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[0] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] select_index = np.random.choice(len(sample_1)) logger.info( LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s", sample_index2, sample_index1, select_index) next_gen = [] for i in range(len(sample_1)): if i is select_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen
python
def _selection(candidate): """Perform selection action to candidates. For example, new gene = sample_1 + the 5th bit of sample2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _selection([gene1, gene2]) >>> # new_gene could be gene1 overwritten with the >>> # 2nd parameter of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[0] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] select_index = np.random.choice(len(sample_1)) logger.info( LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s", sample_index2, sample_index1, select_index) next_gen = [] for i in range(len(sample_1)): if i is select_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen
[ "def", "_selection", "(", "candidate", ")", ":", "sample_index1", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample_index2", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample_1", "=", "candidate", "[", "sample_index1", "]", "sample_2", "=", "candidate", "[", "sample_index2", "]", "select_index", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "sample_1", ")", ")", "logger", ".", "info", "(", "LOGGING_PREFIX", "+", "\"Perform selection from %sth to %sth at index=%s\"", ",", "sample_index2", ",", "sample_index1", ",", "select_index", ")", "next_gen", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sample_1", ")", ")", ":", "if", "i", "is", "select_index", ":", "next_gen", ".", "append", "(", "sample_2", "[", "i", "]", ")", "else", ":", "next_gen", ".", "append", "(", "sample_1", "[", "i", "]", ")", "return", "next_gen" ]
Perform selection action to candidates. For example, new gene = sample_1 + the 5th bit of sample2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _selection([gene1, gene2]) >>> # new_gene could be gene1 overwritten with the >>> # 2nd parameter of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[0] Returns: New gene (encoding)
[ "Perform", "selection", "action", "to", "candidates", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automl/genetic_searcher.py#L140-L178
24,271
ray-project/ray
python/ray/tune/automl/genetic_searcher.py
GeneticSearch._crossover
def _crossover(candidate): """Perform crossover action to candidates. For example, new gene = 60% sample_1 + 40% sample_2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _crossover([gene1, gene2]) >>> # new_gene could be the first [n=1] parameters of >>> # gene1 + the rest of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[1] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7)) logger.info( LOGGING_PREFIX + "Perform crossover between %sth and %sth at index=%s", sample_index1, sample_index2, cross_index) next_gen = [] for i in range(len(sample_1)): if i > cross_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen
python
def _crossover(candidate): """Perform crossover action to candidates. For example, new gene = 60% sample_1 + 40% sample_2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _crossover([gene1, gene2]) >>> # new_gene could be the first [n=1] parameters of >>> # gene1 + the rest of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[1] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7)) logger.info( LOGGING_PREFIX + "Perform crossover between %sth and %sth at index=%s", sample_index1, sample_index2, cross_index) next_gen = [] for i in range(len(sample_1)): if i > cross_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen
[ "def", "_crossover", "(", "candidate", ")", ":", "sample_index1", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample_index2", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample_1", "=", "candidate", "[", "sample_index1", "]", "sample_2", "=", "candidate", "[", "sample_index2", "]", "cross_index", "=", "int", "(", "len", "(", "sample_1", ")", "*", "np", ".", "random", ".", "uniform", "(", "low", "=", "0.3", ",", "high", "=", "0.7", ")", ")", "logger", ".", "info", "(", "LOGGING_PREFIX", "+", "\"Perform crossover between %sth and %sth at index=%s\"", ",", "sample_index1", ",", "sample_index2", ",", "cross_index", ")", "next_gen", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sample_1", ")", ")", ":", "if", "i", ">", "cross_index", ":", "next_gen", ".", "append", "(", "sample_2", "[", "i", "]", ")", "else", ":", "next_gen", ".", "append", "(", "sample_1", "[", "i", "]", ")", "return", "next_gen" ]
Perform crossover action to candidates. For example, new gene = 60% sample_1 + 40% sample_2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _crossover([gene1, gene2]) >>> # new_gene could be the first [n=1] parameters of >>> # gene1 + the rest of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[1] Returns: New gene (encoding)
[ "Perform", "crossover", "action", "to", "candidates", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automl/genetic_searcher.py#L181-L220
24,272
ray-project/ray
python/ray/tune/automl/genetic_searcher.py
GeneticSearch._mutation
def _mutation(candidate, rate=0.1): """Perform mutation action to candidates. For example, randomly change 10% of original sample Args: candidate: List of candidate genes (encodings). rate: Percentage of mutation bits Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> new_gene = _mutation([gene1]) >>> # new_gene could be the gene1 with the 3rd parameter changed >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene1[1] >>> # new_gene[2] = [0, 1] != gene1[2] Returns: New gene (encoding) """ sample_index = np.random.choice(len(candidate)) sample = candidate[sample_index] idx_list = [] for i in range(int(max(len(sample) * rate, 1))): idx = np.random.choice(len(sample)) idx_list.append(idx) field = sample[idx] # one-hot encoding field[np.argmax(field)] = 0 bit = np.random.choice(field.shape[0]) field[bit] = 1 logger.info(LOGGING_PREFIX + "Perform mutation on %sth at index=%s", sample_index, str(idx_list)) return sample
python
def _mutation(candidate, rate=0.1): """Perform mutation action to candidates. For example, randomly change 10% of original sample Args: candidate: List of candidate genes (encodings). rate: Percentage of mutation bits Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> new_gene = _mutation([gene1]) >>> # new_gene could be the gene1 with the 3rd parameter changed >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene1[1] >>> # new_gene[2] = [0, 1] != gene1[2] Returns: New gene (encoding) """ sample_index = np.random.choice(len(candidate)) sample = candidate[sample_index] idx_list = [] for i in range(int(max(len(sample) * rate, 1))): idx = np.random.choice(len(sample)) idx_list.append(idx) field = sample[idx] # one-hot encoding field[np.argmax(field)] = 0 bit = np.random.choice(field.shape[0]) field[bit] = 1 logger.info(LOGGING_PREFIX + "Perform mutation on %sth at index=%s", sample_index, str(idx_list)) return sample
[ "def", "_mutation", "(", "candidate", ",", "rate", "=", "0.1", ")", ":", "sample_index", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "candidate", ")", ")", "sample", "=", "candidate", "[", "sample_index", "]", "idx_list", "=", "[", "]", "for", "i", "in", "range", "(", "int", "(", "max", "(", "len", "(", "sample", ")", "*", "rate", ",", "1", ")", ")", ")", ":", "idx", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "sample", ")", ")", "idx_list", ".", "append", "(", "idx", ")", "field", "=", "sample", "[", "idx", "]", "# one-hot encoding", "field", "[", "np", ".", "argmax", "(", "field", ")", "]", "=", "0", "bit", "=", "np", ".", "random", ".", "choice", "(", "field", ".", "shape", "[", "0", "]", ")", "field", "[", "bit", "]", "=", "1", "logger", ".", "info", "(", "LOGGING_PREFIX", "+", "\"Perform mutation on %sth at index=%s\"", ",", "sample_index", ",", "str", "(", "idx_list", ")", ")", "return", "sample" ]
Perform mutation action to candidates. For example, randomly change 10% of original sample Args: candidate: List of candidate genes (encodings). rate: Percentage of mutation bits Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> new_gene = _mutation([gene1]) >>> # new_gene could be the gene1 with the 3rd parameter changed >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene1[1] >>> # new_gene[2] = [0, 1] != gene1[2] Returns: New gene (encoding)
[ "Perform", "mutation", "action", "to", "candidates", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automl/genetic_searcher.py#L223-L258
24,273
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor._train
def _train(self, trial): """Start one iteration of training and save remote id.""" assert trial.status == Trial.RUNNING, trial.status remote = trial.runner.train.remote() # Local Mode if isinstance(remote, dict): remote = _LocalWrapper(remote) self._running[remote] = trial
python
def _train(self, trial): """Start one iteration of training and save remote id.""" assert trial.status == Trial.RUNNING, trial.status remote = trial.runner.train.remote() # Local Mode if isinstance(remote, dict): remote = _LocalWrapper(remote) self._running[remote] = trial
[ "def", "_train", "(", "self", ",", "trial", ")", ":", "assert", "trial", ".", "status", "==", "Trial", ".", "RUNNING", ",", "trial", ".", "status", "remote", "=", "trial", ".", "runner", ".", "train", ".", "remote", "(", ")", "# Local Mode", "if", "isinstance", "(", "remote", ",", "dict", ")", ":", "remote", "=", "_LocalWrapper", "(", "remote", ")", "self", ".", "_running", "[", "remote", "]", "=", "trial" ]
Start one iteration of training and save remote id.
[ "Start", "one", "iteration", "of", "training", "and", "save", "remote", "id", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L107-L117
24,274
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor._start_trial
def _start_trial(self, trial, checkpoint=None): """Starts trial and restores last result if trial was paused. Raises: ValueError if restoring from checkpoint fails. """ prior_status = trial.status self.set_status(trial, Trial.RUNNING) trial.runner = self._setup_runner( trial, reuse_allowed=checkpoint is not None or trial._checkpoint.value is not None) if not self.restore(trial, checkpoint): if trial.status == Trial.ERROR: raise RuntimeError( "Restore from checkpoint failed for Trial {}.".format( str(trial))) previous_run = self._find_item(self._paused, trial) if (prior_status == Trial.PAUSED and previous_run): # If Trial was in flight when paused, self._paused stores result. self._paused.pop(previous_run[0]) self._running[previous_run[0]] = trial else: self._train(trial)
python
def _start_trial(self, trial, checkpoint=None): """Starts trial and restores last result if trial was paused. Raises: ValueError if restoring from checkpoint fails. """ prior_status = trial.status self.set_status(trial, Trial.RUNNING) trial.runner = self._setup_runner( trial, reuse_allowed=checkpoint is not None or trial._checkpoint.value is not None) if not self.restore(trial, checkpoint): if trial.status == Trial.ERROR: raise RuntimeError( "Restore from checkpoint failed for Trial {}.".format( str(trial))) previous_run = self._find_item(self._paused, trial) if (prior_status == Trial.PAUSED and previous_run): # If Trial was in flight when paused, self._paused stores result. self._paused.pop(previous_run[0]) self._running[previous_run[0]] = trial else: self._train(trial)
[ "def", "_start_trial", "(", "self", ",", "trial", ",", "checkpoint", "=", "None", ")", ":", "prior_status", "=", "trial", ".", "status", "self", ".", "set_status", "(", "trial", ",", "Trial", ".", "RUNNING", ")", "trial", ".", "runner", "=", "self", ".", "_setup_runner", "(", "trial", ",", "reuse_allowed", "=", "checkpoint", "is", "not", "None", "or", "trial", ".", "_checkpoint", ".", "value", "is", "not", "None", ")", "if", "not", "self", ".", "restore", "(", "trial", ",", "checkpoint", ")", ":", "if", "trial", ".", "status", "==", "Trial", ".", "ERROR", ":", "raise", "RuntimeError", "(", "\"Restore from checkpoint failed for Trial {}.\"", ".", "format", "(", "str", "(", "trial", ")", ")", ")", "previous_run", "=", "self", ".", "_find_item", "(", "self", ".", "_paused", ",", "trial", ")", "if", "(", "prior_status", "==", "Trial", ".", "PAUSED", "and", "previous_run", ")", ":", "# If Trial was in flight when paused, self._paused stores result.", "self", ".", "_paused", ".", "pop", "(", "previous_run", "[", "0", "]", ")", "self", ".", "_running", "[", "previous_run", "[", "0", "]", "]", "=", "trial", "else", ":", "self", ".", "_train", "(", "trial", ")" ]
Starts trial and restores last result if trial was paused. Raises: ValueError if restoring from checkpoint fails.
[ "Starts", "trial", "and", "restores", "last", "result", "if", "trial", "was", "paused", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L119-L143
24,275
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor._stop_trial
def _stop_trial(self, trial, error=False, error_msg=None, stop_logger=True): """Stops this trial. Stops this trial, releasing all allocating resources. If stopping the trial fails, the run will be marked as terminated in error, but no exception will be thrown. Args: error (bool): Whether to mark this trial as terminated in error. error_msg (str): Optional error message. stop_logger (bool): Whether to shut down the trial logger. """ if stop_logger: trial.close_logger() if error: self.set_status(trial, Trial.ERROR) else: self.set_status(trial, Trial.TERMINATED) try: trial.write_error_log(error_msg) if hasattr(trial, "runner") and trial.runner: if (not error and self._reuse_actors and self._cached_actor is None): logger.debug("Reusing actor for {}".format(trial.runner)) self._cached_actor = trial.runner else: logger.info( "Destroying actor for trial {}. If your trainable is " "slow to initialize, consider setting " "reuse_actors=True to reduce actor creation " "overheads.".format(trial)) trial.runner.stop.remote() trial.runner.__ray_terminate__.remote() except Exception: logger.exception("Error stopping runner for Trial %s", str(trial)) self.set_status(trial, Trial.ERROR) finally: trial.runner = None
python
def _stop_trial(self, trial, error=False, error_msg=None, stop_logger=True): """Stops this trial. Stops this trial, releasing all allocating resources. If stopping the trial fails, the run will be marked as terminated in error, but no exception will be thrown. Args: error (bool): Whether to mark this trial as terminated in error. error_msg (str): Optional error message. stop_logger (bool): Whether to shut down the trial logger. """ if stop_logger: trial.close_logger() if error: self.set_status(trial, Trial.ERROR) else: self.set_status(trial, Trial.TERMINATED) try: trial.write_error_log(error_msg) if hasattr(trial, "runner") and trial.runner: if (not error and self._reuse_actors and self._cached_actor is None): logger.debug("Reusing actor for {}".format(trial.runner)) self._cached_actor = trial.runner else: logger.info( "Destroying actor for trial {}. If your trainable is " "slow to initialize, consider setting " "reuse_actors=True to reduce actor creation " "overheads.".format(trial)) trial.runner.stop.remote() trial.runner.__ray_terminate__.remote() except Exception: logger.exception("Error stopping runner for Trial %s", str(trial)) self.set_status(trial, Trial.ERROR) finally: trial.runner = None
[ "def", "_stop_trial", "(", "self", ",", "trial", ",", "error", "=", "False", ",", "error_msg", "=", "None", ",", "stop_logger", "=", "True", ")", ":", "if", "stop_logger", ":", "trial", ".", "close_logger", "(", ")", "if", "error", ":", "self", ".", "set_status", "(", "trial", ",", "Trial", ".", "ERROR", ")", "else", ":", "self", ".", "set_status", "(", "trial", ",", "Trial", ".", "TERMINATED", ")", "try", ":", "trial", ".", "write_error_log", "(", "error_msg", ")", "if", "hasattr", "(", "trial", ",", "\"runner\"", ")", "and", "trial", ".", "runner", ":", "if", "(", "not", "error", "and", "self", ".", "_reuse_actors", "and", "self", ".", "_cached_actor", "is", "None", ")", ":", "logger", ".", "debug", "(", "\"Reusing actor for {}\"", ".", "format", "(", "trial", ".", "runner", ")", ")", "self", ".", "_cached_actor", "=", "trial", ".", "runner", "else", ":", "logger", ".", "info", "(", "\"Destroying actor for trial {}. If your trainable is \"", "\"slow to initialize, consider setting \"", "\"reuse_actors=True to reduce actor creation \"", "\"overheads.\"", ".", "format", "(", "trial", ")", ")", "trial", ".", "runner", ".", "stop", ".", "remote", "(", ")", "trial", ".", "runner", ".", "__ray_terminate__", ".", "remote", "(", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Error stopping runner for Trial %s\"", ",", "str", "(", "trial", ")", ")", "self", ".", "set_status", "(", "trial", ",", "Trial", ".", "ERROR", ")", "finally", ":", "trial", ".", "runner", "=", "None" ]
Stops this trial. Stops this trial, releasing all allocating resources. If stopping the trial fails, the run will be marked as terminated in error, but no exception will be thrown. Args: error (bool): Whether to mark this trial as terminated in error. error_msg (str): Optional error message. stop_logger (bool): Whether to shut down the trial logger.
[ "Stops", "this", "trial", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L145-L186
24,276
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.start_trial
def start_trial(self, trial, checkpoint=None): """Starts the trial. Will not return resources if trial repeatedly fails on start. Args: trial (Trial): Trial to be started. checkpoint (Checkpoint): A Python object or path storing the state of trial. """ self._commit_resources(trial.resources) try: self._start_trial(trial, checkpoint) except Exception as e: logger.exception("Error starting runner for Trial %s", str(trial)) error_msg = traceback.format_exc() time.sleep(2) self._stop_trial(trial, error=True, error_msg=error_msg) if isinstance(e, AbortTrialExecution): return # don't retry fatal Tune errors try: # This forces the trial to not start from checkpoint. trial.clear_checkpoint() logger.info( "Trying to start runner for Trial %s without checkpoint.", str(trial)) self._start_trial(trial) except Exception: logger.exception( "Error starting runner for Trial %s, aborting!", str(trial)) error_msg = traceback.format_exc() self._stop_trial(trial, error=True, error_msg=error_msg)
python
def start_trial(self, trial, checkpoint=None): """Starts the trial. Will not return resources if trial repeatedly fails on start. Args: trial (Trial): Trial to be started. checkpoint (Checkpoint): A Python object or path storing the state of trial. """ self._commit_resources(trial.resources) try: self._start_trial(trial, checkpoint) except Exception as e: logger.exception("Error starting runner for Trial %s", str(trial)) error_msg = traceback.format_exc() time.sleep(2) self._stop_trial(trial, error=True, error_msg=error_msg) if isinstance(e, AbortTrialExecution): return # don't retry fatal Tune errors try: # This forces the trial to not start from checkpoint. trial.clear_checkpoint() logger.info( "Trying to start runner for Trial %s without checkpoint.", str(trial)) self._start_trial(trial) except Exception: logger.exception( "Error starting runner for Trial %s, aborting!", str(trial)) error_msg = traceback.format_exc() self._stop_trial(trial, error=True, error_msg=error_msg)
[ "def", "start_trial", "(", "self", ",", "trial", ",", "checkpoint", "=", "None", ")", ":", "self", ".", "_commit_resources", "(", "trial", ".", "resources", ")", "try", ":", "self", ".", "_start_trial", "(", "trial", ",", "checkpoint", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"Error starting runner for Trial %s\"", ",", "str", "(", "trial", ")", ")", "error_msg", "=", "traceback", ".", "format_exc", "(", ")", "time", ".", "sleep", "(", "2", ")", "self", ".", "_stop_trial", "(", "trial", ",", "error", "=", "True", ",", "error_msg", "=", "error_msg", ")", "if", "isinstance", "(", "e", ",", "AbortTrialExecution", ")", ":", "return", "# don't retry fatal Tune errors", "try", ":", "# This forces the trial to not start from checkpoint.", "trial", ".", "clear_checkpoint", "(", ")", "logger", ".", "info", "(", "\"Trying to start runner for Trial %s without checkpoint.\"", ",", "str", "(", "trial", ")", ")", "self", ".", "_start_trial", "(", "trial", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Error starting runner for Trial %s, aborting!\"", ",", "str", "(", "trial", ")", ")", "error_msg", "=", "traceback", ".", "format_exc", "(", ")", "self", ".", "_stop_trial", "(", "trial", ",", "error", "=", "True", ",", "error_msg", "=", "error_msg", ")" ]
Starts the trial. Will not return resources if trial repeatedly fails on start. Args: trial (Trial): Trial to be started. checkpoint (Checkpoint): A Python object or path storing the state of trial.
[ "Starts", "the", "trial", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L188-L221
24,277
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.stop_trial
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True): """Only returns resources if resources allocated.""" prior_status = trial.status self._stop_trial( trial, error=error, error_msg=error_msg, stop_logger=stop_logger) if prior_status == Trial.RUNNING: logger.debug("Returning resources for Trial %s.", str(trial)) self._return_resources(trial.resources) out = self._find_item(self._running, trial) for result_id in out: self._running.pop(result_id)
python
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True): """Only returns resources if resources allocated.""" prior_status = trial.status self._stop_trial( trial, error=error, error_msg=error_msg, stop_logger=stop_logger) if prior_status == Trial.RUNNING: logger.debug("Returning resources for Trial %s.", str(trial)) self._return_resources(trial.resources) out = self._find_item(self._running, trial) for result_id in out: self._running.pop(result_id)
[ "def", "stop_trial", "(", "self", ",", "trial", ",", "error", "=", "False", ",", "error_msg", "=", "None", ",", "stop_logger", "=", "True", ")", ":", "prior_status", "=", "trial", ".", "status", "self", ".", "_stop_trial", "(", "trial", ",", "error", "=", "error", ",", "error_msg", "=", "error_msg", ",", "stop_logger", "=", "stop_logger", ")", "if", "prior_status", "==", "Trial", ".", "RUNNING", ":", "logger", ".", "debug", "(", "\"Returning resources for Trial %s.\"", ",", "str", "(", "trial", ")", ")", "self", ".", "_return_resources", "(", "trial", ".", "resources", ")", "out", "=", "self", ".", "_find_item", "(", "self", ".", "_running", ",", "trial", ")", "for", "result_id", "in", "out", ":", "self", ".", "_running", ".", "pop", "(", "result_id", ")" ]
Only returns resources if resources allocated.
[ "Only", "returns", "resources", "if", "resources", "allocated", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L229-L239
24,278
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.fetch_result
def fetch_result(self, trial): """Fetches one result of the running trials. Returns: Result of the most recent trial training run.""" trial_future = self._find_item(self._running, trial) if not trial_future: raise ValueError("Trial was not running.") self._running.pop(trial_future[0]) with warn_if_slow("fetch_result"): result = ray.get(trial_future[0]) # For local mode if isinstance(result, _LocalWrapper): result = result.unwrap() return result
python
def fetch_result(self, trial): """Fetches one result of the running trials. Returns: Result of the most recent trial training run.""" trial_future = self._find_item(self._running, trial) if not trial_future: raise ValueError("Trial was not running.") self._running.pop(trial_future[0]) with warn_if_slow("fetch_result"): result = ray.get(trial_future[0]) # For local mode if isinstance(result, _LocalWrapper): result = result.unwrap() return result
[ "def", "fetch_result", "(", "self", ",", "trial", ")", ":", "trial_future", "=", "self", ".", "_find_item", "(", "self", ".", "_running", ",", "trial", ")", "if", "not", "trial_future", ":", "raise", "ValueError", "(", "\"Trial was not running.\"", ")", "self", ".", "_running", ".", "pop", "(", "trial_future", "[", "0", "]", ")", "with", "warn_if_slow", "(", "\"fetch_result\"", ")", ":", "result", "=", "ray", ".", "get", "(", "trial_future", "[", "0", "]", ")", "# For local mode", "if", "isinstance", "(", "result", ",", "_LocalWrapper", ")", ":", "result", "=", "result", ".", "unwrap", "(", ")", "return", "result" ]
Fetches one result of the running trials. Returns: Result of the most recent trial training run.
[ "Fetches", "one", "result", "of", "the", "running", "trials", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L305-L320
24,279
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.has_resources
def has_resources(self, resources): """Returns whether this runner has at least the specified resources. This refreshes the Ray cluster resources if the time since last update has exceeded self._refresh_period. This also assumes that the cluster is not resizing very frequently. """ if time.time() - self._last_resource_refresh > self._refresh_period: self._update_avail_resources() currently_available = Resources.subtract(self._avail_resources, self._committed_resources) have_space = ( resources.cpu_total() <= currently_available.cpu and resources.gpu_total() <= currently_available.gpu and all( resources.get_res_total(res) <= currently_available.get(res) for res in resources.custom_resources)) if have_space: return True can_overcommit = self._queue_trials if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \ (resources.gpu_total() > 0 and currently_available.gpu <= 0) or \ any((resources.get_res_total(res_name) > 0 and currently_available.get(res_name) <= 0) for res_name in resources.custom_resources): can_overcommit = False # requested resource is already saturated if can_overcommit: logger.warning( "Allowing trial to start even though the " "cluster does not have enough free resources. Trial actors " "may appear to hang until enough resources are added to the " "cluster (e.g., via autoscaling). You can disable this " "behavior by specifying `queue_trials=False` in " "ray.tune.run().") return True return False
python
def has_resources(self, resources): """Returns whether this runner has at least the specified resources. This refreshes the Ray cluster resources if the time since last update has exceeded self._refresh_period. This also assumes that the cluster is not resizing very frequently. """ if time.time() - self._last_resource_refresh > self._refresh_period: self._update_avail_resources() currently_available = Resources.subtract(self._avail_resources, self._committed_resources) have_space = ( resources.cpu_total() <= currently_available.cpu and resources.gpu_total() <= currently_available.gpu and all( resources.get_res_total(res) <= currently_available.get(res) for res in resources.custom_resources)) if have_space: return True can_overcommit = self._queue_trials if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \ (resources.gpu_total() > 0 and currently_available.gpu <= 0) or \ any((resources.get_res_total(res_name) > 0 and currently_available.get(res_name) <= 0) for res_name in resources.custom_resources): can_overcommit = False # requested resource is already saturated if can_overcommit: logger.warning( "Allowing trial to start even though the " "cluster does not have enough free resources. Trial actors " "may appear to hang until enough resources are added to the " "cluster (e.g., via autoscaling). You can disable this " "behavior by specifying `queue_trials=False` in " "ray.tune.run().") return True return False
[ "def", "has_resources", "(", "self", ",", "resources", ")", ":", "if", "time", ".", "time", "(", ")", "-", "self", ".", "_last_resource_refresh", ">", "self", ".", "_refresh_period", ":", "self", ".", "_update_avail_resources", "(", ")", "currently_available", "=", "Resources", ".", "subtract", "(", "self", ".", "_avail_resources", ",", "self", ".", "_committed_resources", ")", "have_space", "=", "(", "resources", ".", "cpu_total", "(", ")", "<=", "currently_available", ".", "cpu", "and", "resources", ".", "gpu_total", "(", ")", "<=", "currently_available", ".", "gpu", "and", "all", "(", "resources", ".", "get_res_total", "(", "res", ")", "<=", "currently_available", ".", "get", "(", "res", ")", "for", "res", "in", "resources", ".", "custom_resources", ")", ")", "if", "have_space", ":", "return", "True", "can_overcommit", "=", "self", ".", "_queue_trials", "if", "(", "resources", ".", "cpu_total", "(", ")", ">", "0", "and", "currently_available", ".", "cpu", "<=", "0", ")", "or", "(", "resources", ".", "gpu_total", "(", ")", ">", "0", "and", "currently_available", ".", "gpu", "<=", "0", ")", "or", "any", "(", "(", "resources", ".", "get_res_total", "(", "res_name", ")", ">", "0", "and", "currently_available", ".", "get", "(", "res_name", ")", "<=", "0", ")", "for", "res_name", "in", "resources", ".", "custom_resources", ")", ":", "can_overcommit", "=", "False", "# requested resource is already saturated", "if", "can_overcommit", ":", "logger", ".", "warning", "(", "\"Allowing trial to start even though the \"", "\"cluster does not have enough free resources. Trial actors \"", "\"may appear to hang until enough resources are added to the \"", "\"cluster (e.g., via autoscaling). You can disable this \"", "\"behavior by specifying `queue_trials=False` in \"", "\"ray.tune.run().\"", ")", "return", "True", "return", "False" ]
Returns whether this runner has at least the specified resources. This refreshes the Ray cluster resources if the time since last update has exceeded self._refresh_period. This also assumes that the cluster is not resizing very frequently.
[ "Returns", "whether", "this", "runner", "has", "at", "least", "the", "specified", "resources", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L389-L430
24,280
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.resource_string
def resource_string(self): """Returns a string describing the total resources available.""" if self._resources_initialized: res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu, self._avail_resources.gpu) if self._avail_resources.custom_resources: custom = ", ".join( "{} {}".format( self._avail_resources.get_res_total(name), name) for name in self._avail_resources.custom_resources) res_str += " ({})".format(custom) return res_str else: return "? CPUs, ? GPUs"
python
def resource_string(self): """Returns a string describing the total resources available.""" if self._resources_initialized: res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu, self._avail_resources.gpu) if self._avail_resources.custom_resources: custom = ", ".join( "{} {}".format( self._avail_resources.get_res_total(name), name) for name in self._avail_resources.custom_resources) res_str += " ({})".format(custom) return res_str else: return "? CPUs, ? GPUs"
[ "def", "resource_string", "(", "self", ")", ":", "if", "self", ".", "_resources_initialized", ":", "res_str", "=", "\"{} CPUs, {} GPUs\"", ".", "format", "(", "self", ".", "_avail_resources", ".", "cpu", ",", "self", ".", "_avail_resources", ".", "gpu", ")", "if", "self", ".", "_avail_resources", ".", "custom_resources", ":", "custom", "=", "\", \"", ".", "join", "(", "\"{} {}\"", ".", "format", "(", "self", ".", "_avail_resources", ".", "get_res_total", "(", "name", ")", ",", "name", ")", "for", "name", "in", "self", ".", "_avail_resources", ".", "custom_resources", ")", "res_str", "+=", "\" ({})\"", ".", "format", "(", "custom", ")", "return", "res_str", "else", ":", "return", "\"? CPUs, ? GPUs\"" ]
Returns a string describing the total resources available.
[ "Returns", "a", "string", "describing", "the", "total", "resources", "available", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L451-L465
24,281
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.save
def save(self, trial, storage=Checkpoint.DISK): """Saves the trial's state to a checkpoint.""" trial._checkpoint.storage = storage trial._checkpoint.last_result = trial.last_result if storage == Checkpoint.MEMORY: trial._checkpoint.value = trial.runner.save_to_object.remote() else: # Keeps only highest performing checkpoints if enabled if trial.keep_checkpoints_num: try: last_attr_val = trial.last_result[ trial.checkpoint_score_attr] if (trial.compare_checkpoints(last_attr_val) and not math.isnan(last_attr_val)): trial.best_checkpoint_attr_value = last_attr_val self._checkpoint_and_erase(trial) except KeyError: logger.warning( "Result dict has no key: {}. keep" "_checkpoints_num flag will not work".format( trial.checkpoint_score_attr)) else: with warn_if_slow("save_to_disk"): trial._checkpoint.value = ray.get( trial.runner.save.remote()) return trial._checkpoint.value
python
def save(self, trial, storage=Checkpoint.DISK): """Saves the trial's state to a checkpoint.""" trial._checkpoint.storage = storage trial._checkpoint.last_result = trial.last_result if storage == Checkpoint.MEMORY: trial._checkpoint.value = trial.runner.save_to_object.remote() else: # Keeps only highest performing checkpoints if enabled if trial.keep_checkpoints_num: try: last_attr_val = trial.last_result[ trial.checkpoint_score_attr] if (trial.compare_checkpoints(last_attr_val) and not math.isnan(last_attr_val)): trial.best_checkpoint_attr_value = last_attr_val self._checkpoint_and_erase(trial) except KeyError: logger.warning( "Result dict has no key: {}. keep" "_checkpoints_num flag will not work".format( trial.checkpoint_score_attr)) else: with warn_if_slow("save_to_disk"): trial._checkpoint.value = ray.get( trial.runner.save.remote()) return trial._checkpoint.value
[ "def", "save", "(", "self", ",", "trial", ",", "storage", "=", "Checkpoint", ".", "DISK", ")", ":", "trial", ".", "_checkpoint", ".", "storage", "=", "storage", "trial", ".", "_checkpoint", ".", "last_result", "=", "trial", ".", "last_result", "if", "storage", "==", "Checkpoint", ".", "MEMORY", ":", "trial", ".", "_checkpoint", ".", "value", "=", "trial", ".", "runner", ".", "save_to_object", ".", "remote", "(", ")", "else", ":", "# Keeps only highest performing checkpoints if enabled", "if", "trial", ".", "keep_checkpoints_num", ":", "try", ":", "last_attr_val", "=", "trial", ".", "last_result", "[", "trial", ".", "checkpoint_score_attr", "]", "if", "(", "trial", ".", "compare_checkpoints", "(", "last_attr_val", ")", "and", "not", "math", ".", "isnan", "(", "last_attr_val", ")", ")", ":", "trial", ".", "best_checkpoint_attr_value", "=", "last_attr_val", "self", ".", "_checkpoint_and_erase", "(", "trial", ")", "except", "KeyError", ":", "logger", ".", "warning", "(", "\"Result dict has no key: {}. keep\"", "\"_checkpoints_num flag will not work\"", ".", "format", "(", "trial", ".", "checkpoint_score_attr", ")", ")", "else", ":", "with", "warn_if_slow", "(", "\"save_to_disk\"", ")", ":", "trial", ".", "_checkpoint", ".", "value", "=", "ray", ".", "get", "(", "trial", ".", "runner", ".", "save", ".", "remote", "(", ")", ")", "return", "trial", ".", "_checkpoint", ".", "value" ]
Saves the trial's state to a checkpoint.
[ "Saves", "the", "trial", "s", "state", "to", "a", "checkpoint", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L471-L497
24,282
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor.export_trial_if_needed
def export_trial_if_needed(self, trial): """Exports model of this trial based on trial.export_formats. Return: A dict that maps ExportFormats to successfully exported models. """ if trial.export_formats and len(trial.export_formats) > 0: return ray.get( trial.runner.export_model.remote(trial.export_formats)) return {}
python
def export_trial_if_needed(self, trial): """Exports model of this trial based on trial.export_formats. Return: A dict that maps ExportFormats to successfully exported models. """ if trial.export_formats and len(trial.export_formats) > 0: return ray.get( trial.runner.export_model.remote(trial.export_formats)) return {}
[ "def", "export_trial_if_needed", "(", "self", ",", "trial", ")", ":", "if", "trial", ".", "export_formats", "and", "len", "(", "trial", ".", "export_formats", ")", ">", "0", ":", "return", "ray", ".", "get", "(", "trial", ".", "runner", ".", "export_model", ".", "remote", "(", "trial", ".", "export_formats", ")", ")", "return", "{", "}" ]
Exports model of this trial based on trial.export_formats. Return: A dict that maps ExportFormats to successfully exported models.
[ "Exports", "model", "of", "this", "trial", "based", "on", "trial", ".", "export_formats", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L547-L556
24,283
ray-project/ray
python/ray/experimental/streaming/streaming.py
Environment.__generate_actor
def __generate_actor(self, instance_id, operator, input, output): """Generates an actor that will execute a particular instance of the logical operator Attributes: instance_id (UUID): The id of the instance the actor will execute. operator (Operator): The metadata of the logical operator. input (DataInput): The input gate that manages input channels of the instance (see: DataInput in communication.py). input (DataOutput): The output gate that manages output channels of the instance (see: DataOutput in communication.py). """ actor_id = (operator.id, instance_id) # Record the physical dataflow graph (for debugging purposes) self.__add_channel(actor_id, input, output) # Select actor to construct if operator.type == OpType.Source: source = operator_instance.Source.remote(actor_id, operator, input, output) source.register_handle.remote(source) return source.start.remote() elif operator.type == OpType.Map: map = operator_instance.Map.remote(actor_id, operator, input, output) map.register_handle.remote(map) return map.start.remote() elif operator.type == OpType.FlatMap: flatmap = operator_instance.FlatMap.remote(actor_id, operator, input, output) flatmap.register_handle.remote(flatmap) return flatmap.start.remote() elif operator.type == OpType.Filter: filter = operator_instance.Filter.remote(actor_id, operator, input, output) filter.register_handle.remote(filter) return filter.start.remote() elif operator.type == OpType.Reduce: reduce = operator_instance.Reduce.remote(actor_id, operator, input, output) reduce.register_handle.remote(reduce) return reduce.start.remote() elif operator.type == OpType.TimeWindow: pass elif operator.type == OpType.KeyBy: keyby = operator_instance.KeyBy.remote(actor_id, operator, input, output) keyby.register_handle.remote(keyby) return keyby.start.remote() elif operator.type == OpType.Sum: sum = operator_instance.Reduce.remote(actor_id, operator, input, output) # Register target handle at state actor state_actor = operator.state_actor if state_actor is not None: state_actor.register_target.remote(sum) # Register own handle sum.register_handle.remote(sum) return sum.start.remote() elif operator.type == OpType.Sink: pass elif operator.type == OpType.Inspect: inspect = operator_instance.Inspect.remote(actor_id, operator, input, output) inspect.register_handle.remote(inspect) return inspect.start.remote() elif operator.type == OpType.ReadTextFile: # TODO (john): Colocate the source with the input file read = operator_instance.ReadTextFile.remote( actor_id, operator, input, output) read.register_handle.remote(read) return read.start.remote() else: # TODO (john): Add support for other types of operators sys.exit("Unrecognized or unsupported {} operator type.".format( operator.type))
python
def __generate_actor(self, instance_id, operator, input, output): """Generates an actor that will execute a particular instance of the logical operator Attributes: instance_id (UUID): The id of the instance the actor will execute. operator (Operator): The metadata of the logical operator. input (DataInput): The input gate that manages input channels of the instance (see: DataInput in communication.py). input (DataOutput): The output gate that manages output channels of the instance (see: DataOutput in communication.py). """ actor_id = (operator.id, instance_id) # Record the physical dataflow graph (for debugging purposes) self.__add_channel(actor_id, input, output) # Select actor to construct if operator.type == OpType.Source: source = operator_instance.Source.remote(actor_id, operator, input, output) source.register_handle.remote(source) return source.start.remote() elif operator.type == OpType.Map: map = operator_instance.Map.remote(actor_id, operator, input, output) map.register_handle.remote(map) return map.start.remote() elif operator.type == OpType.FlatMap: flatmap = operator_instance.FlatMap.remote(actor_id, operator, input, output) flatmap.register_handle.remote(flatmap) return flatmap.start.remote() elif operator.type == OpType.Filter: filter = operator_instance.Filter.remote(actor_id, operator, input, output) filter.register_handle.remote(filter) return filter.start.remote() elif operator.type == OpType.Reduce: reduce = operator_instance.Reduce.remote(actor_id, operator, input, output) reduce.register_handle.remote(reduce) return reduce.start.remote() elif operator.type == OpType.TimeWindow: pass elif operator.type == OpType.KeyBy: keyby = operator_instance.KeyBy.remote(actor_id, operator, input, output) keyby.register_handle.remote(keyby) return keyby.start.remote() elif operator.type == OpType.Sum: sum = operator_instance.Reduce.remote(actor_id, operator, input, output) # Register target handle at state actor state_actor = operator.state_actor if state_actor is not None: state_actor.register_target.remote(sum) # Register own handle sum.register_handle.remote(sum) return sum.start.remote() elif operator.type == OpType.Sink: pass elif operator.type == OpType.Inspect: inspect = operator_instance.Inspect.remote(actor_id, operator, input, output) inspect.register_handle.remote(inspect) return inspect.start.remote() elif operator.type == OpType.ReadTextFile: # TODO (john): Colocate the source with the input file read = operator_instance.ReadTextFile.remote( actor_id, operator, input, output) read.register_handle.remote(read) return read.start.remote() else: # TODO (john): Add support for other types of operators sys.exit("Unrecognized or unsupported {} operator type.".format( operator.type))
[ "def", "__generate_actor", "(", "self", ",", "instance_id", ",", "operator", ",", "input", ",", "output", ")", ":", "actor_id", "=", "(", "operator", ".", "id", ",", "instance_id", ")", "# Record the physical dataflow graph (for debugging purposes)", "self", ".", "__add_channel", "(", "actor_id", ",", "input", ",", "output", ")", "# Select actor to construct", "if", "operator", ".", "type", "==", "OpType", ".", "Source", ":", "source", "=", "operator_instance", ".", "Source", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "source", ".", "register_handle", ".", "remote", "(", "source", ")", "return", "source", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "Map", ":", "map", "=", "operator_instance", ".", "Map", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "map", ".", "register_handle", ".", "remote", "(", "map", ")", "return", "map", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "FlatMap", ":", "flatmap", "=", "operator_instance", ".", "FlatMap", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "flatmap", ".", "register_handle", ".", "remote", "(", "flatmap", ")", "return", "flatmap", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "Filter", ":", "filter", "=", "operator_instance", ".", "Filter", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "filter", ".", "register_handle", ".", "remote", "(", "filter", ")", "return", "filter", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "Reduce", ":", "reduce", "=", "operator_instance", ".", "Reduce", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "reduce", ".", "register_handle", ".", "remote", "(", "reduce", ")", "return", "reduce", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "TimeWindow", ":", "pass", "elif", "operator", ".", "type", "==", "OpType", ".", "KeyBy", ":", "keyby", "=", "operator_instance", ".", "KeyBy", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "keyby", ".", "register_handle", ".", "remote", "(", "keyby", ")", "return", "keyby", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "Sum", ":", "sum", "=", "operator_instance", ".", "Reduce", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "# Register target handle at state actor", "state_actor", "=", "operator", ".", "state_actor", "if", "state_actor", "is", "not", "None", ":", "state_actor", ".", "register_target", ".", "remote", "(", "sum", ")", "# Register own handle", "sum", ".", "register_handle", ".", "remote", "(", "sum", ")", "return", "sum", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "Sink", ":", "pass", "elif", "operator", ".", "type", "==", "OpType", ".", "Inspect", ":", "inspect", "=", "operator_instance", ".", "Inspect", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "inspect", ".", "register_handle", ".", "remote", "(", "inspect", ")", "return", "inspect", ".", "start", ".", "remote", "(", ")", "elif", "operator", ".", "type", "==", "OpType", ".", "ReadTextFile", ":", "# TODO (john): Colocate the source with the input file", "read", "=", "operator_instance", ".", "ReadTextFile", ".", "remote", "(", "actor_id", ",", "operator", ",", "input", ",", "output", ")", "read", ".", "register_handle", ".", "remote", "(", "read", ")", "return", "read", ".", "start", ".", "remote", "(", ")", "else", ":", "# TODO (john): Add support for other types of operators", "sys", ".", "exit", "(", "\"Unrecognized or unsupported {} operator type.\"", ".", "format", "(", "operator", ".", "type", ")", ")" ]
Generates an actor that will execute a particular instance of the logical operator Attributes: instance_id (UUID): The id of the instance the actor will execute. operator (Operator): The metadata of the logical operator. input (DataInput): The input gate that manages input channels of the instance (see: DataInput in communication.py). input (DataOutput): The output gate that manages output channels of the instance (see: DataOutput in communication.py).
[ "Generates", "an", "actor", "that", "will", "execute", "a", "particular", "instance", "of", "the", "logical", "operator" ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L96-L169
24,284
ray-project/ray
python/ray/experimental/streaming/streaming.py
Environment.__generate_actors
def __generate_actors(self, operator, upstream_channels, downstream_channels): """Generates one actor for each instance of the given logical operator. Attributes: operator (Operator): The logical operator metadata. upstream_channels (list): A list of all upstream channels for all instances of the operator. downstream_channels (list): A list of all downstream channels for all instances of the operator. """ num_instances = operator.num_instances logger.info("Generating {} actors of type {}...".format( num_instances, operator.type)) in_channels = upstream_channels.pop( operator.id) if upstream_channels else [] handles = [] for i in range(num_instances): # Collect input and output channels for the particular instance ip = [ channel for channel in in_channels if channel.dst_instance_id == i ] if in_channels else [] op = [ channel for channels_list in downstream_channels.values() for channel in channels_list if channel.src_instance_id == i ] log = "Constructed {} input and {} output channels " log += "for the {}-th instance of the {} operator." logger.debug(log.format(len(ip), len(op), i, operator.type)) input_gate = DataInput(ip) output_gate = DataOutput(op, operator.partitioning_strategies) handle = self.__generate_actor(i, operator, input_gate, output_gate) if handle: handles.append(handle) return handles
python
def __generate_actors(self, operator, upstream_channels, downstream_channels): """Generates one actor for each instance of the given logical operator. Attributes: operator (Operator): The logical operator metadata. upstream_channels (list): A list of all upstream channels for all instances of the operator. downstream_channels (list): A list of all downstream channels for all instances of the operator. """ num_instances = operator.num_instances logger.info("Generating {} actors of type {}...".format( num_instances, operator.type)) in_channels = upstream_channels.pop( operator.id) if upstream_channels else [] handles = [] for i in range(num_instances): # Collect input and output channels for the particular instance ip = [ channel for channel in in_channels if channel.dst_instance_id == i ] if in_channels else [] op = [ channel for channels_list in downstream_channels.values() for channel in channels_list if channel.src_instance_id == i ] log = "Constructed {} input and {} output channels " log += "for the {}-th instance of the {} operator." logger.debug(log.format(len(ip), len(op), i, operator.type)) input_gate = DataInput(ip) output_gate = DataOutput(op, operator.partitioning_strategies) handle = self.__generate_actor(i, operator, input_gate, output_gate) if handle: handles.append(handle) return handles
[ "def", "__generate_actors", "(", "self", ",", "operator", ",", "upstream_channels", ",", "downstream_channels", ")", ":", "num_instances", "=", "operator", ".", "num_instances", "logger", ".", "info", "(", "\"Generating {} actors of type {}...\"", ".", "format", "(", "num_instances", ",", "operator", ".", "type", ")", ")", "in_channels", "=", "upstream_channels", ".", "pop", "(", "operator", ".", "id", ")", "if", "upstream_channels", "else", "[", "]", "handles", "=", "[", "]", "for", "i", "in", "range", "(", "num_instances", ")", ":", "# Collect input and output channels for the particular instance", "ip", "=", "[", "channel", "for", "channel", "in", "in_channels", "if", "channel", ".", "dst_instance_id", "==", "i", "]", "if", "in_channels", "else", "[", "]", "op", "=", "[", "channel", "for", "channels_list", "in", "downstream_channels", ".", "values", "(", ")", "for", "channel", "in", "channels_list", "if", "channel", ".", "src_instance_id", "==", "i", "]", "log", "=", "\"Constructed {} input and {} output channels \"", "log", "+=", "\"for the {}-th instance of the {} operator.\"", "logger", ".", "debug", "(", "log", ".", "format", "(", "len", "(", "ip", ")", ",", "len", "(", "op", ")", ",", "i", ",", "operator", ".", "type", ")", ")", "input_gate", "=", "DataInput", "(", "ip", ")", "output_gate", "=", "DataOutput", "(", "op", ",", "operator", ".", "partitioning_strategies", ")", "handle", "=", "self", ".", "__generate_actor", "(", "i", ",", "operator", ",", "input_gate", ",", "output_gate", ")", "if", "handle", ":", "handles", ".", "append", "(", "handle", ")", "return", "handles" ]
Generates one actor for each instance of the given logical operator. Attributes: operator (Operator): The logical operator metadata. upstream_channels (list): A list of all upstream channels for all instances of the operator. downstream_channels (list): A list of all downstream channels for all instances of the operator.
[ "Generates", "one", "actor", "for", "each", "instance", "of", "the", "given", "logical", "operator", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L173-L210
24,285
ray-project/ray
python/ray/experimental/streaming/streaming.py
Environment.execute
def execute(self): """Deploys and executes the physical dataflow.""" self._collect_garbage() # Make sure everything is clean # TODO (john): Check if dataflow has any 'logical inconsistencies' # For example, if there is a forward partitioning strategy but # the number of downstream instances is larger than the number of # upstream instances, some of the downstream instances will not be # used at all # Each operator instance is implemented as a Ray actor # Actors are deployed in topological order, as we traverse the # logical dataflow from sources to sinks. At each step, data # producers wait for acknowledge from consumers before starting # generating data. upstream_channels = {} for node in nx.topological_sort(self.logical_topo): operator = self.operators[node] # Generate downstream data channels downstream_channels = self._generate_channels(operator) # Instantiate Ray actors handles = self.__generate_actors(operator, upstream_channels, downstream_channels) if handles: self.actor_handles.extend(handles) upstream_channels.update(downstream_channels) logger.debug("Running...") return self.actor_handles
python
def execute(self): """Deploys and executes the physical dataflow.""" self._collect_garbage() # Make sure everything is clean # TODO (john): Check if dataflow has any 'logical inconsistencies' # For example, if there is a forward partitioning strategy but # the number of downstream instances is larger than the number of # upstream instances, some of the downstream instances will not be # used at all # Each operator instance is implemented as a Ray actor # Actors are deployed in topological order, as we traverse the # logical dataflow from sources to sinks. At each step, data # producers wait for acknowledge from consumers before starting # generating data. upstream_channels = {} for node in nx.topological_sort(self.logical_topo): operator = self.operators[node] # Generate downstream data channels downstream_channels = self._generate_channels(operator) # Instantiate Ray actors handles = self.__generate_actors(operator, upstream_channels, downstream_channels) if handles: self.actor_handles.extend(handles) upstream_channels.update(downstream_channels) logger.debug("Running...") return self.actor_handles
[ "def", "execute", "(", "self", ")", ":", "self", ".", "_collect_garbage", "(", ")", "# Make sure everything is clean", "# TODO (john): Check if dataflow has any 'logical inconsistencies'", "# For example, if there is a forward partitioning strategy but", "# the number of downstream instances is larger than the number of", "# upstream instances, some of the downstream instances will not be", "# used at all", "# Each operator instance is implemented as a Ray actor", "# Actors are deployed in topological order, as we traverse the", "# logical dataflow from sources to sinks. At each step, data", "# producers wait for acknowledge from consumers before starting", "# generating data.", "upstream_channels", "=", "{", "}", "for", "node", "in", "nx", ".", "topological_sort", "(", "self", ".", "logical_topo", ")", ":", "operator", "=", "self", ".", "operators", "[", "node", "]", "# Generate downstream data channels", "downstream_channels", "=", "self", ".", "_generate_channels", "(", "operator", ")", "# Instantiate Ray actors", "handles", "=", "self", ".", "__generate_actors", "(", "operator", ",", "upstream_channels", ",", "downstream_channels", ")", "if", "handles", ":", "self", ".", "actor_handles", ".", "extend", "(", "handles", ")", "upstream_channels", ".", "update", "(", "downstream_channels", ")", "logger", ".", "debug", "(", "\"Running...\"", ")", "return", "self", ".", "actor_handles" ]
Deploys and executes the physical dataflow.
[ "Deploys", "and", "executes", "the", "physical", "dataflow", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L306-L332
24,286
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.set_parallelism
def set_parallelism(self, num_instances): """Sets the number of instances for the source operator of the stream. Attributes: num_instances (int): The level of parallelism for the source operator of the stream. """ assert (num_instances > 0) self.env._set_parallelism(self.src_operator_id, num_instances) return self
python
def set_parallelism(self, num_instances): """Sets the number of instances for the source operator of the stream. Attributes: num_instances (int): The level of parallelism for the source operator of the stream. """ assert (num_instances > 0) self.env._set_parallelism(self.src_operator_id, num_instances) return self
[ "def", "set_parallelism", "(", "self", ",", "num_instances", ")", ":", "assert", "(", "num_instances", ">", "0", ")", "self", ".", "env", ".", "_set_parallelism", "(", "self", ".", "src_operator_id", ",", "num_instances", ")", "return", "self" ]
Sets the number of instances for the source operator of the stream. Attributes: num_instances (int): The level of parallelism for the source operator of the stream.
[ "Sets", "the", "number", "of", "instances", "for", "the", "source", "operator", "of", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L467-L476
24,287
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.map
def map(self, map_fn, name="Map"): """Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map. """ op = Operator( _generate_uuid(), OpType.Map, name, map_fn, num_instances=self.env.config.parallelism) return self.__register(op)
python
def map(self, map_fn, name="Map"): """Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map. """ op = Operator( _generate_uuid(), OpType.Map, name, map_fn, num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "map", "(", "self", ",", "map_fn", ",", "name", "=", "\"Map\"", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "Map", ",", "name", ",", "map_fn", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map.
[ "Applies", "a", "map", "operator", "to", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L521-L533
24,288
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.flat_map
def flat_map(self, flatmap_fn): """Applies a flatmap operator to the stream. Attributes: flatmap_fn (function): The user-defined logic of the flatmap (e.g. split()). """ op = Operator( _generate_uuid(), OpType.FlatMap, "FlatMap", flatmap_fn, num_instances=self.env.config.parallelism) return self.__register(op)
python
def flat_map(self, flatmap_fn): """Applies a flatmap operator to the stream. Attributes: flatmap_fn (function): The user-defined logic of the flatmap (e.g. split()). """ op = Operator( _generate_uuid(), OpType.FlatMap, "FlatMap", flatmap_fn, num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "flat_map", "(", "self", ",", "flatmap_fn", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "FlatMap", ",", "\"FlatMap\"", ",", "flatmap_fn", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Applies a flatmap operator to the stream. Attributes: flatmap_fn (function): The user-defined logic of the flatmap (e.g. split()).
[ "Applies", "a", "flatmap", "operator", "to", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L536-L549
24,289
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.key_by
def key_by(self, key_selector): """Applies a key_by operator to the stream. Attributes: key_attribute_index (int): The index of the key attributed (assuming tuple records). """ op = Operator( _generate_uuid(), OpType.KeyBy, "KeyBy", other=key_selector, num_instances=self.env.config.parallelism) return self.__register(op)
python
def key_by(self, key_selector): """Applies a key_by operator to the stream. Attributes: key_attribute_index (int): The index of the key attributed (assuming tuple records). """ op = Operator( _generate_uuid(), OpType.KeyBy, "KeyBy", other=key_selector, num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "key_by", "(", "self", ",", "key_selector", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "KeyBy", ",", "\"KeyBy\"", ",", "other", "=", "key_selector", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Applies a key_by operator to the stream. Attributes: key_attribute_index (int): The index of the key attributed (assuming tuple records).
[ "Applies", "a", "key_by", "operator", "to", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L553-L566
24,290
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.time_window
def time_window(self, window_width_ms): """Applies a system time window to the stream. Attributes: window_width_ms (int): The length of the window in ms. """ op = Operator( _generate_uuid(), OpType.TimeWindow, "TimeWindow", num_instances=self.env.config.parallelism, other=window_width_ms) return self.__register(op)
python
def time_window(self, window_width_ms): """Applies a system time window to the stream. Attributes: window_width_ms (int): The length of the window in ms. """ op = Operator( _generate_uuid(), OpType.TimeWindow, "TimeWindow", num_instances=self.env.config.parallelism, other=window_width_ms) return self.__register(op)
[ "def", "time_window", "(", "self", ",", "window_width_ms", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "TimeWindow", ",", "\"TimeWindow\"", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ",", "other", "=", "window_width_ms", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Applies a system time window to the stream. Attributes: window_width_ms (int): The length of the window in ms.
[ "Applies", "a", "system", "time", "window", "to", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L605-L617
24,291
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.filter
def filter(self, filter_fn): """Applies a filter to the stream. Attributes: filter_fn (function): The user-defined filter function. """ op = Operator( _generate_uuid(), OpType.Filter, "Filter", filter_fn, num_instances=self.env.config.parallelism) return self.__register(op)
python
def filter(self, filter_fn): """Applies a filter to the stream. Attributes: filter_fn (function): The user-defined filter function. """ op = Operator( _generate_uuid(), OpType.Filter, "Filter", filter_fn, num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "filter", "(", "self", ",", "filter_fn", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "Filter", ",", "\"Filter\"", ",", "filter_fn", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Applies a filter to the stream. Attributes: filter_fn (function): The user-defined filter function.
[ "Applies", "a", "filter", "to", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L620-L632
24,292
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.inspect
def inspect(self, inspect_logic): """Inspects the content of the stream. Attributes: inspect_logic (function): The user-defined inspect function. """ op = Operator( _generate_uuid(), OpType.Inspect, "Inspect", inspect_logic, num_instances=self.env.config.parallelism) return self.__register(op)
python
def inspect(self, inspect_logic): """Inspects the content of the stream. Attributes: inspect_logic (function): The user-defined inspect function. """ op = Operator( _generate_uuid(), OpType.Inspect, "Inspect", inspect_logic, num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "inspect", "(", "self", ",", "inspect_logic", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "Inspect", ",", "\"Inspect\"", ",", "inspect_logic", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Inspects the content of the stream. Attributes: inspect_logic (function): The user-defined inspect function.
[ "Inspects", "the", "content", "of", "the", "stream", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L644-L656
24,293
ray-project/ray
python/ray/experimental/streaming/streaming.py
DataStream.sink
def sink(self): """Closes the stream with a sink operator.""" op = Operator( _generate_uuid(), OpType.Sink, "Sink", num_instances=self.env.config.parallelism) return self.__register(op)
python
def sink(self): """Closes the stream with a sink operator.""" op = Operator( _generate_uuid(), OpType.Sink, "Sink", num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "sink", "(", "self", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "Sink", ",", "\"Sink\"", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Closes the stream with a sink operator.
[ "Closes", "the", "stream", "with", "a", "sink", "operator", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L661-L668
24,294
ray-project/ray
python/ray/log_monitor.py
LogMonitor.update_log_filenames
def update_log_filenames(self): """Update the list of log files to monitor.""" log_filenames = os.listdir(self.logs_dir) for log_filename in log_filenames: full_path = os.path.join(self.logs_dir, log_filename) if full_path not in self.log_filenames: self.log_filenames.add(full_path) self.closed_file_infos.append( LogFileInfo( filename=full_path, size_when_last_opened=0, file_position=0, file_handle=None)) logger.info("Beginning to track file {}".format(log_filename))
python
def update_log_filenames(self): """Update the list of log files to monitor.""" log_filenames = os.listdir(self.logs_dir) for log_filename in log_filenames: full_path = os.path.join(self.logs_dir, log_filename) if full_path not in self.log_filenames: self.log_filenames.add(full_path) self.closed_file_infos.append( LogFileInfo( filename=full_path, size_when_last_opened=0, file_position=0, file_handle=None)) logger.info("Beginning to track file {}".format(log_filename))
[ "def", "update_log_filenames", "(", "self", ")", ":", "log_filenames", "=", "os", ".", "listdir", "(", "self", ".", "logs_dir", ")", "for", "log_filename", "in", "log_filenames", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logs_dir", ",", "log_filename", ")", "if", "full_path", "not", "in", "self", ".", "log_filenames", ":", "self", ".", "log_filenames", ".", "add", "(", "full_path", ")", "self", ".", "closed_file_infos", ".", "append", "(", "LogFileInfo", "(", "filename", "=", "full_path", ",", "size_when_last_opened", "=", "0", ",", "file_position", "=", "0", ",", "file_handle", "=", "None", ")", ")", "logger", ".", "info", "(", "\"Beginning to track file {}\"", ".", "format", "(", "log_filename", ")", ")" ]
Update the list of log files to monitor.
[ "Update", "the", "list", "of", "log", "files", "to", "monitor", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/log_monitor.py#L90-L104
24,295
ray-project/ray
python/ray/log_monitor.py
LogMonitor.open_closed_files
def open_closed_files(self): """Open some closed files if they may have new lines. Opening more files may require us to close some of the already open files. """ if not self.can_open_more_files: # If we can't open any more files. Close all of the files. self.close_all_files() files_with_no_updates = [] while len(self.closed_file_infos) > 0: if (len(self.open_file_infos) >= ray_constants.LOG_MONITOR_MAX_OPEN_FILES): self.can_open_more_files = False break file_info = self.closed_file_infos.pop(0) assert file_info.file_handle is None # Get the file size to see if it has gotten bigger since we last # opened it. try: file_size = os.path.getsize(file_info.filename) except (IOError, OSError) as e: # Catch "file not found" errors. if e.errno == errno.ENOENT: logger.warning("Warning: The file {} was not " "found.".format(file_info.filename)) self.log_filenames.remove(file_info.filename) continue raise e # If some new lines have been added to this file, try to reopen the # file. if file_size > file_info.size_when_last_opened: try: f = open(file_info.filename, "r") except (IOError, OSError) as e: if e.errno == errno.ENOENT: logger.warning("Warning: The file {} was not " "found.".format(file_info.filename)) self.log_filenames.remove(file_info.filename) continue else: raise e f.seek(file_info.file_position) file_info.filesize_when_last_opened = file_size file_info.file_handle = f self.open_file_infos.append(file_info) else: files_with_no_updates.append(file_info) # Add the files with no changes back to the list of closed files. self.closed_file_infos += files_with_no_updates
python
def open_closed_files(self): """Open some closed files if they may have new lines. Opening more files may require us to close some of the already open files. """ if not self.can_open_more_files: # If we can't open any more files. Close all of the files. self.close_all_files() files_with_no_updates = [] while len(self.closed_file_infos) > 0: if (len(self.open_file_infos) >= ray_constants.LOG_MONITOR_MAX_OPEN_FILES): self.can_open_more_files = False break file_info = self.closed_file_infos.pop(0) assert file_info.file_handle is None # Get the file size to see if it has gotten bigger since we last # opened it. try: file_size = os.path.getsize(file_info.filename) except (IOError, OSError) as e: # Catch "file not found" errors. if e.errno == errno.ENOENT: logger.warning("Warning: The file {} was not " "found.".format(file_info.filename)) self.log_filenames.remove(file_info.filename) continue raise e # If some new lines have been added to this file, try to reopen the # file. if file_size > file_info.size_when_last_opened: try: f = open(file_info.filename, "r") except (IOError, OSError) as e: if e.errno == errno.ENOENT: logger.warning("Warning: The file {} was not " "found.".format(file_info.filename)) self.log_filenames.remove(file_info.filename) continue else: raise e f.seek(file_info.file_position) file_info.filesize_when_last_opened = file_size file_info.file_handle = f self.open_file_infos.append(file_info) else: files_with_no_updates.append(file_info) # Add the files with no changes back to the list of closed files. self.closed_file_infos += files_with_no_updates
[ "def", "open_closed_files", "(", "self", ")", ":", "if", "not", "self", ".", "can_open_more_files", ":", "# If we can't open any more files. Close all of the files.", "self", ".", "close_all_files", "(", ")", "files_with_no_updates", "=", "[", "]", "while", "len", "(", "self", ".", "closed_file_infos", ")", ">", "0", ":", "if", "(", "len", "(", "self", ".", "open_file_infos", ")", ">=", "ray_constants", ".", "LOG_MONITOR_MAX_OPEN_FILES", ")", ":", "self", ".", "can_open_more_files", "=", "False", "break", "file_info", "=", "self", ".", "closed_file_infos", ".", "pop", "(", "0", ")", "assert", "file_info", ".", "file_handle", "is", "None", "# Get the file size to see if it has gotten bigger since we last", "# opened it.", "try", ":", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "file_info", ".", "filename", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e", ":", "# Catch \"file not found\" errors.", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "logger", ".", "warning", "(", "\"Warning: The file {} was not \"", "\"found.\"", ".", "format", "(", "file_info", ".", "filename", ")", ")", "self", ".", "log_filenames", ".", "remove", "(", "file_info", ".", "filename", ")", "continue", "raise", "e", "# If some new lines have been added to this file, try to reopen the", "# file.", "if", "file_size", ">", "file_info", ".", "size_when_last_opened", ":", "try", ":", "f", "=", "open", "(", "file_info", ".", "filename", ",", "\"r\"", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "logger", ".", "warning", "(", "\"Warning: The file {} was not \"", "\"found.\"", ".", "format", "(", "file_info", ".", "filename", ")", ")", "self", ".", "log_filenames", ".", "remove", "(", "file_info", ".", "filename", ")", "continue", "else", ":", "raise", "e", "f", ".", "seek", "(", "file_info", ".", "file_position", ")", "file_info", ".", "filesize_when_last_opened", "=", "file_size", "file_info", ".", "file_handle", "=", "f", "self", ".", "open_file_infos", ".", "append", "(", "file_info", ")", "else", ":", "files_with_no_updates", ".", "append", "(", "file_info", ")", "# Add the files with no changes back to the list of closed files.", "self", ".", "closed_file_infos", "+=", "files_with_no_updates" ]
Open some closed files if they may have new lines. Opening more files may require us to close some of the already open files.
[ "Open", "some", "closed", "files", "if", "they", "may", "have", "new", "lines", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/log_monitor.py#L106-L160
24,296
ray-project/ray
python/ray/log_monitor.py
LogMonitor.run
def run(self): """Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis. """ while True: self.update_log_filenames() self.open_closed_files() anything_published = self.check_log_files_and_publish_updates() # If nothing was published, then wait a little bit before checking # for logs to avoid using too much CPU. if not anything_published: time.sleep(0.05)
python
def run(self): """Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis. """ while True: self.update_log_filenames() self.open_closed_files() anything_published = self.check_log_files_and_publish_updates() # If nothing was published, then wait a little bit before checking # for logs to avoid using too much CPU. if not anything_published: time.sleep(0.05)
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "self", ".", "update_log_filenames", "(", ")", "self", ".", "open_closed_files", "(", ")", "anything_published", "=", "self", ".", "check_log_files_and_publish_updates", "(", ")", "# If nothing was published, then wait a little bit before checking", "# for logs to avoid using too much CPU.", "if", "not", "anything_published", ":", "time", ".", "sleep", "(", "0.05", ")" ]
Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis.
[ "Run", "the", "log", "monitor", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/log_monitor.py#L210-L223
24,297
ray-project/ray
python/ray/tune/suggest/suggestion.py
SuggestionAlgorithm.add_configurations
def add_configurations(self, experiments): """Chains generator given experiment specifications. Arguments: experiments (Experiment | list | dict): Experiments to run. """ experiment_list = convert_to_experiment_list(experiments) for experiment in experiment_list: self._trial_generator = itertools.chain( self._trial_generator, self._generate_trials(experiment.spec, experiment.name))
python
def add_configurations(self, experiments): """Chains generator given experiment specifications. Arguments: experiments (Experiment | list | dict): Experiments to run. """ experiment_list = convert_to_experiment_list(experiments) for experiment in experiment_list: self._trial_generator = itertools.chain( self._trial_generator, self._generate_trials(experiment.spec, experiment.name))
[ "def", "add_configurations", "(", "self", ",", "experiments", ")", ":", "experiment_list", "=", "convert_to_experiment_list", "(", "experiments", ")", "for", "experiment", "in", "experiment_list", ":", "self", ".", "_trial_generator", "=", "itertools", ".", "chain", "(", "self", ".", "_trial_generator", ",", "self", ".", "_generate_trials", "(", "experiment", ".", "spec", ",", "experiment", ".", "name", ")", ")" ]
Chains generator given experiment specifications. Arguments: experiments (Experiment | list | dict): Experiments to run.
[ "Chains", "generator", "given", "experiment", "specifications", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/suggestion.py#L43-L53
24,298
ray-project/ray
python/ray/tune/suggest/suggestion.py
SuggestionAlgorithm.next_trials
def next_trials(self): """Provides a batch of Trial objects to be queued into the TrialRunner. A batch ends when self._trial_generator returns None. Returns: trials (list): Returns a list of trials. """ trials = [] for trial in self._trial_generator: if trial is None: return trials trials += [trial] self._finished = True return trials
python
def next_trials(self): """Provides a batch of Trial objects to be queued into the TrialRunner. A batch ends when self._trial_generator returns None. Returns: trials (list): Returns a list of trials. """ trials = [] for trial in self._trial_generator: if trial is None: return trials trials += [trial] self._finished = True return trials
[ "def", "next_trials", "(", "self", ")", ":", "trials", "=", "[", "]", "for", "trial", "in", "self", ".", "_trial_generator", ":", "if", "trial", "is", "None", ":", "return", "trials", "trials", "+=", "[", "trial", "]", "self", ".", "_finished", "=", "True", "return", "trials" ]
Provides a batch of Trial objects to be queued into the TrialRunner. A batch ends when self._trial_generator returns None. Returns: trials (list): Returns a list of trials.
[ "Provides", "a", "batch", "of", "Trial", "objects", "to", "be", "queued", "into", "the", "TrialRunner", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/suggestion.py#L55-L71
24,299
ray-project/ray
python/ray/tune/suggest/suggestion.py
SuggestionAlgorithm._generate_trials
def _generate_trials(self, experiment_spec, output_path=""): """Generates trials with configurations from `_suggest`. Creates a trial_id that is passed into `_suggest`. Yields: Trial objects constructed according to `spec` """ if "run" not in experiment_spec: raise TuneError("Must specify `run` in {}".format(experiment_spec)) for _ in range(experiment_spec.get("num_samples", 1)): trial_id = Trial.generate_id() while True: suggested_config = self._suggest(trial_id) if suggested_config is None: yield None else: break spec = copy.deepcopy(experiment_spec) spec["config"] = merge_dicts(spec["config"], suggested_config) flattened_config = resolve_nested_dict(spec["config"]) self._counter += 1 tag = "{0}_{1}".format( str(self._counter), format_vars(flattened_config)) yield create_trial_from_spec( spec, output_path, self._parser, experiment_tag=tag, trial_id=trial_id)
python
def _generate_trials(self, experiment_spec, output_path=""): """Generates trials with configurations from `_suggest`. Creates a trial_id that is passed into `_suggest`. Yields: Trial objects constructed according to `spec` """ if "run" not in experiment_spec: raise TuneError("Must specify `run` in {}".format(experiment_spec)) for _ in range(experiment_spec.get("num_samples", 1)): trial_id = Trial.generate_id() while True: suggested_config = self._suggest(trial_id) if suggested_config is None: yield None else: break spec = copy.deepcopy(experiment_spec) spec["config"] = merge_dicts(spec["config"], suggested_config) flattened_config = resolve_nested_dict(spec["config"]) self._counter += 1 tag = "{0}_{1}".format( str(self._counter), format_vars(flattened_config)) yield create_trial_from_spec( spec, output_path, self._parser, experiment_tag=tag, trial_id=trial_id)
[ "def", "_generate_trials", "(", "self", ",", "experiment_spec", ",", "output_path", "=", "\"\"", ")", ":", "if", "\"run\"", "not", "in", "experiment_spec", ":", "raise", "TuneError", "(", "\"Must specify `run` in {}\"", ".", "format", "(", "experiment_spec", ")", ")", "for", "_", "in", "range", "(", "experiment_spec", ".", "get", "(", "\"num_samples\"", ",", "1", ")", ")", ":", "trial_id", "=", "Trial", ".", "generate_id", "(", ")", "while", "True", ":", "suggested_config", "=", "self", ".", "_suggest", "(", "trial_id", ")", "if", "suggested_config", "is", "None", ":", "yield", "None", "else", ":", "break", "spec", "=", "copy", ".", "deepcopy", "(", "experiment_spec", ")", "spec", "[", "\"config\"", "]", "=", "merge_dicts", "(", "spec", "[", "\"config\"", "]", ",", "suggested_config", ")", "flattened_config", "=", "resolve_nested_dict", "(", "spec", "[", "\"config\"", "]", ")", "self", ".", "_counter", "+=", "1", "tag", "=", "\"{0}_{1}\"", ".", "format", "(", "str", "(", "self", ".", "_counter", ")", ",", "format_vars", "(", "flattened_config", ")", ")", "yield", "create_trial_from_spec", "(", "spec", ",", "output_path", ",", "self", ".", "_parser", ",", "experiment_tag", "=", "tag", ",", "trial_id", "=", "trial_id", ")" ]
Generates trials with configurations from `_suggest`. Creates a trial_id that is passed into `_suggest`. Yields: Trial objects constructed according to `spec`
[ "Generates", "trials", "with", "configurations", "from", "_suggest", "." ]
4eade036a0505e244c976f36aaa2d64386b5129b
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/suggestion.py#L73-L102