python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
"""Functions for the optimisation (including evolution) and evaluation.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import clip
from matplotlib import pyplot as plt
import numpy as np
import time
import torch
import torchvision.transforms as transforms
from .video_utils import show_and_save
# Show each image being evaluated for debugging purposes.
VISUALISE_BATCH_IMAGES = False
def augmentation_transforms(canvas_width,
use_normalized_clip=False,
use_augmentation=False):
"""Image transforms to produce distorted crops to augment the evaluation.
Args:
canvas_width: width of the drawing canvas
use_normalized_clip: Normalisation to better suit CLIP's training data
use_augmentation: Image augmentation by affine transform
Returns:
transforms
"""
if use_normalized_clip and use_augmentation:
augment_trans = transforms.Compose(
[transforms.RandomPerspective(fill=1, p=1, distortion_scale=0.6),
transforms.RandomResizedCrop(canvas_width, scale=(0.7, 0.9)),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))])
elif use_augmentation:
augment_trans = transforms.Compose([
transforms.RandomPerspective(fill=1, p=1, distortion_scale=0.6),
transforms.RandomResizedCrop(canvas_width, scale=(0.7, 0.9)),
])
elif use_normalized_clip:
augment_trans = transforms.Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))
else:
augment_trans = transforms.RandomPerspective(
fill=1, p=0, distortion_scale=0)
return augment_trans
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot_and_save_losses(
loss_history, title="Losses", filename=None, show=True):
"""Plot losses and save to file."""
losses = np.array(loss_history)
if filename:
np.save(filename + ".npy", losses, allow_pickle=True)
if show:
plt.figure(figsize=(10, 10))
plt.xlabel("Training steps")
plt.ylabel("Loss")
plt.title(title)
plt.plot(moving_average(losses, n=3))
plt.savefig(filename + ".png")
def make_optimizer(generator, learning_rate):
"""Make optimizer for generator's parameters.
Args:
generator: generator model
learning_rate: learning rate
Returns:
optimizer
"""
my_list = ["positions_top"]
params = list(map(lambda x: x[1], list(filter(lambda kv: kv[0] in my_list,
generator.named_parameters()))))
base_params = list(map(
lambda x: x[1], list(filter(
lambda kv: kv[0] not in my_list, generator.named_parameters()))))
lr_scheduler = torch.optim.SGD([{"params": base_params},
{"params": params, "lr": learning_rate}],
lr=learning_rate)
return lr_scheduler
def compute_text_features(prompts, clip_model, device):
"""Compute CLIP features for all prompts."""
text_inputs = []
for prompt in prompts:
text_inputs.append(clip.tokenize(prompt).to(device))
features = []
with torch.no_grad():
for text_input in text_inputs:
features.append(clip_model.encode_text(text_input))
return features
def create_augmented_batch(images, augment_trans, text_features, config):
"""Create batch of images to be evaluated.
Args:
images: batch of images to be augmented [N, C, H, W]
augment_trans: transformations for augmentations
text_features: text feature per image
config: dictionary with config
Returns:
img_batch: Augmented versions of the original images [N*num_augs, C, H, W]
num_augs: number of images per original image
expanded_text_features: a text feature for each augmentation
loss_weights: weights for the losses corresponding to each augmentation
"""
images = images.permute(0, 3, 1, 2) # NHWC -> NCHW
expanded_text_features = []
if config["use_image_augmentations"]:
num_augs = config["num_augs"]
img_augs = []
for _ in range(num_augs):
img_n = augment_trans(images)
img_augs.append(img_n)
expanded_text_features.append(text_features[0])
img_batch = torch.cat(img_augs)
# Given images [P0, P1] and augmentations [a0(), a1()], output format:
# [a0(P0), a0(P1), a1(P0), a1(P1)]
else:
num_augs = 1
img_batch = augment_trans(images)
expanded_text_features.append(text_features[0])
return img_batch, num_augs, expanded_text_features, [1] * config["num_augs"]
def create_compositional_batch(images, augment_trans, text_features):
"""Create 10 sub-images per image by augmenting each with 3x3 crops.
Args:
images: population of N images, format [N, C, H, W]
augment_trans: transformations for augmentations
text_features: text feature per image
Returns:
Tensor of all compositional sub-images + originals; [N*10, C, H, W] format:
[x0_y0(P0) ... x0_y0(PN), ..., x2_y2(P0) ... x2_y2(PN), P0, ..., PN]
10: Number of sub-images + whole, per original image.
expanded_text_features: list of text features, 1 for each composition image
loss_weights: weights for the losses corresponding to each composition image
"""
if len(text_features) != 10:
# text_features should already be 10 in size.
raise ValueError(
"10 text prompts required for compositional image creation")
resize_for_clip = transforms.Compose([transforms.Resize((224, 224))])
img_swap = torch.swapaxes(images, 3, 1)
ims = []
i = 0
for x in range(3):
for y in range(3):
for k in range(images.shape[0]):
ims.append(resize_for_clip(
img_swap[k][:, y * 112 : y * 112 + 224, x * 112 : x * 112 + 224]))
i += 1
# Top-level (whole) images
for k in range(images.shape[0]):
ims.append(resize_for_clip(img_swap[k]))
all_img = torch.stack(ims)
all_img = torch.swapaxes(all_img, 1, 3)
all_img = all_img.permute(0, 3, 1, 2) # NHWC -> NCHW
all_img = augment_trans(all_img)
# Last image gets 9 times as much weight
common_weight = 1 / 5
loss_weights = [common_weight] * 9
loss_weights.append(9 * common_weight)
return all_img, 10, text_features, loss_weights
def evaluation(t, clip_enc, generator, augment_trans, text_features,
prompts, config, device):
"""Do a step of evaluation, returning images and losses.
Args:
t: step count
clip_enc: model for CLIP encoding
generator: drawing generator to optimise
augment_trans: transforms for image augmentation
text_features: tuple with the prompt two negative prompts
prompts: for debugging/visualisation - the list of text prompts
config: dictionary with hyperparameters
device: torch device
Returns:
loss: torch.Tensor of single combines loss
losses_separate_np: numpy array of loss for each image
losses_individuals_np: numpy array with loss for each population individual
img_np: numpy array of images from the generator
"""
# Annealing parameters.
params = {"gamma": t / config["optim_steps"]}
# Rebuild the generator.
img = generator(params)
img_np = img.detach().cpu().numpy()
# Create images for different regions
pop_size = img.shape[0]
if config["compositional_image"]:
(img_batch, num_augs, text_features, loss_weights
) = create_compositional_batch(img, augment_trans, text_features)
else:
(img_batch, num_augs, text_features, loss_weights
) = create_augmented_batch(img, augment_trans, text_features, config)
losses = torch.zeros(pop_size, num_augs).to(device)
# Compute and add losses after augmenting the image with transforms.
img_batch = torch.clip(img_batch, 0, 1) # clip the images.
image_features = clip_enc.encode_image(img_batch)
count = 0
for n in range(num_augs): # number of augmentations or composition images
for p in range(pop_size):
loss = torch.cosine_similarity(
text_features[n], image_features[count:count+1], dim=1
)[0] * loss_weights[n]
losses[p, n] -= loss
if VISUALISE_BATCH_IMAGES and t % 500 == 0:
# Show all the images in the batch along with their losses.
if config["compositional_image"]:
print(f"Loss {loss} for image region with prompt {prompts[n]}:")
else:
print(f"Loss {loss} for image augmentation with prompt {prompts[0]}:")
show_and_save(img_batch[count].unsqueeze(0), config,
img_format="SCHW", show=config["gui"])
count += 1
loss = torch.sum(losses) / pop_size
losses_separate_np = losses.detach().cpu().numpy()
# Sum losses for all each population individual.
losses_individuals_np = losses_separate_np.sum(axis=1)
return loss, losses_separate_np, losses_individuals_np, img_np
def step_optimization(t, clip_enc, lr_scheduler, generator, augment_trans,
text_features, prompts, config, device, final_step=False):
"""Do a step of optimization.
Args:
t: step count
clip_enc: model for CLIP encoding
lr_scheduler: optimizer
generator: drawing generator to optimise
augment_trans: transforms for image augmentation
text_features: list or 1 or 9 prompts for normal and compositional creation
prompts: for debugging/visualisation - the list of text prompts
config: dictionary with hyperparameters
device: CUDA device
final_step: if True does extras such as saving the model
Returns:
losses_np: numpy array with loss for each population individual
losses_separate_np: numpy array of loss for each image
"""
# Anneal learning rate and other parameters.
t0 = time.time()
if t == int(config["optim_steps"] / 3):
for g in lr_scheduler.param_groups:
g["lr"] = g["lr"] / 2.0
if t == int(config["optim_steps"] * (2/3)):
for g in lr_scheduler.param_groups:
g["lr"] = g["lr"] / 2.0
# Forward pass.
lr_scheduler.zero_grad()
loss, losses_separate_np, losses_np, img_np = evaluation(
t=t, clip_enc=clip_enc, generator=generator, augment_trans=augment_trans,
text_features=text_features, prompts=prompts, config=config,
device=device)
# Backpropagate the gradients.
loss.backward()
torch.nn.utils.clip_grad_norm(generator.parameters(),
config["gradient_clipping"])
# Decay the learning rate.
lr_scheduler.step()
# Render the big version.
if final_step:
show_and_save(
img_np, config, t=t, img_format="SHWC", show=config["gui"])
output_dir = config["output_dir"]
print(f"Saving model to {output_dir}...")
torch.save(generator.state_dict(), f"{output_dir}/generator.pt")
if t % config["trace_every"] == 0:
output_dir = config["output_dir"]
filename = f"{output_dir}/optim_{t}"
show_and_save(img_np, config,
max_display=config["max_multiple_visualizations"],
stitch=True, img_format="SHWC",
show=config["gui"],
filename=filename)
t1 = time.time()
print("Iteration {:3d}, rendering loss {:.6f}, {:.3f}s/iter".format(
t, loss.item(), t1-t0))
return losses_np, losses_separate_np, img_np
def population_evolution_step(generator, config, losses):
"""GA for the population."""
if config["ga_method"] == "Microbial":
# Competition between 2 random individuals; mutated winner replaces loser.
indices = list(range(len(losses)))
np.random.shuffle(indices)
select_1, select_2 = indices[0], indices[1]
if losses[select_1] < losses[select_2]:
generator.copy_and_mutate_s(select_1, select_2)
else:
generator.copy_and_mutate_s(select_2, select_1)
elif config["ga_method"] == "Evolutionary Strategies":
# Replace rest of population with mutants of the best.
winner = np.argmin(losses)
for other in range(len(losses)):
if other == winner:
continue
generator.copy_and_mutate_s(winner, other)
|
arnheim-main
|
arnheim_3/src/training.py
|
# coding=utf8
# Copyright 2019 the wasserstein_fairness Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithms relating to optimal transport."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl import logging
import numpy as np
import scipy.sparse as sparse
# The inner calculation of the sorted Wasserstein coupling in
# wasserstein_coupling_on_the_real_line depends only on the lengths of the
# vector arguments to that function. We can cache those couplings and
# avoid a costly loop.
_WCOTRL_CACHE = {}
def sinkhorn(cost, lambda_, distn_a, distn_b, tolerance=0.0000001):
"""Use the Sinkhorn algorithm to find an optimum coupling.
An implementation of the algorithm described in
https://papers.nips.cc/paper/4927-sinkhorn-distances-lightspeed-computation-of-optimal-transport.pdf
with variable names referring to notation found there.
Args:
cost: I am pretty sure that this matrix should be square (NxN) and strictly
positive.
lambda_: Regulariser.
distn_a: Input distribution a. A vector of length N.
distn_b: Input distribution b. A vector of length N.
tolerance: Stopping tolerance for the iteration.
Returns:
A 3-tuple with the following members:
[0]: the left matrix scaling factor u (size: [N]),
[1]: the right matrix scaling factor v (size: [N]),
[2]: the optimum coupling between distn_a and distn_b.
"""
big_k = np.exp(-lambda_ * cost)
big_k_tilde = np.dot(np.diag(1.0 / distn_a), big_k) # was matmul
u = np.ones_like(distn_a) / len(distn_a)
old_u = u + tolerance
iteration = itertools.count()
while np.linalg.norm(old_u - u) > tolerance:
old_u = u
u = 1.0 / np.dot(big_k_tilde, (distn_b / np.dot(big_k.T, u))) # was matmul
logging.debug('Sinkhorn iteration: %d', next(iteration))
v = distn_b / np.dot(big_k.T, u)
coupling = np.dot(np.dot(np.diag(u), big_k), np.diag(v)) # was matmul
return u, v, coupling
def wasserstein_coupling_on_the_real_line(x1, x2):
"""Compute the Wasserstein coupling between two sets of real numbers.
Args:
x1: A vector of real numbers.
x2: A vector of real numbers. Need not be the same length as x1.
Returns:
The Wasserstein coupling.
"""
# If x1 and x2 were sorted, then the coupling matrix would only depend on
# their lengths. The sort order of x1 and x2 only require us to permute the
# coupling matrix to match. So, we first compute the coupling matrix for a
# sorted x1, x2:
l1 = len(x1)
l2 = len(x2)
# Shortcut 1: the coupling matrix for sorted vectors of the same length is the
# identity matrix, scaled.
if l1 == l2:
return sparse.spdiags([np.ones_like(x1) / l1], [0], l1, l1)
# Shortcut 2: we may have cached the sorted coupling matrix that we're about
# to compute.
elif (l1, l2) in _WCOTRL_CACHE:
coupling = _WCOTRL_CACHE[(l1, l2)]
# Alas, we need to compute the coupling matrix after all.
else:
# coupling = np.zeros((l1, l2))
data = []
rows = []
cols = []
i, j = 0, 0
while i < l1 and j < l2:
logging.debug('WCOTRL: %d,%d', i, j)
il2 = i * l2
jl1 = j * l1
il2_limit = il2 + l2 - 1
jl1_limit = jl1 + l1 - 1
intersection_size = max(
0,
1 + min(il2_limit, jl1_limit) - max(il2, jl1)
)
# coupling[i, j] = intersection_size
data.append(intersection_size)
rows.append(i)
cols.append(j)
if il2_limit <= jl1_limit: i += 1
if il2_limit >= jl1_limit: j += 1
coupling = sparse.csr_matrix((data, (rows, cols)), shape=(l1, l2))
_WCOTRL_CACHE[(l1, l2)] = coupling # Cache for later.
# Now we permute the rows and columns of the coupling matrix to match the sort
# orders of x1 and x2.
row_inds = np.argsort(np.argsort(x1)) # ⍋⍋x1
col_inds = np.argsort(np.argsort(x2))
coupling = coupling[row_inds, :]
coupling = coupling[:, col_inds]
return coupling / (l1 * l2)
|
wasserstein_fairness-master
|
wasserstein_fairness/optimal_transport.py
|
# Copyright 2019 the wasserstein_fairness Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic components of the costs that combine in Wasserstein Fairness.
The functions in this file are centred around the calculation of individual
costs/losses. Functions that combine these costs (and gradients thereof) appear
in `combined_costs.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.sparse as sparse
import scipy.special as scisp
import sklearn.metrics as skmet
# Use scipy.special's expit for the logistic; it's numerically stable.
sigmoid = scisp.expit
def regression_loss(y_pred, y_true):
"""Logistic regression loss."""
# Aldo's logistic regression loss is the same loss that scikit-learn expresses
# but with the arguments reversed (and some numerical precautions absent).
return skmet.log_loss(y_true, y_pred)
def regression_loss_gradient(x, y, theta):
"""Gradient of the logistic loss with respect to theta.
The regression parameters `theta` may have one more entry than there are
features (columns) in `x`, in which case an intercept term will be added to
`x` prior to computing the gradient.
Args:
x: Feature inputs to the regression (each row a data point). An MxN matrix.
y: Target outputs of the regression; a vector of length M.
theta: Regression parameters; a vector of length N or N+1.
Returns:
The gradient as described.
"""
x = _maybe_add_intercept(x, theta)
h = sigmoid(np.dot(x, theta)) # was matmul
return np.dot(x.T, (h - y)) / y.size # was matmul
def wasserstein_two_loss_gradient(x1, x2, coupling, theta):
"""Gradient of the Wasserstein-2 distance loss with respect to theta.
The regression parameters `theta` may have one more entry than there are
features (columns) in `x1` or `x2`, in which case an intercept term will be
added to both prior to computing the gradient.
Args:
x1: A subset of feature inputs to the regression: a JxN matrix.
x2: Another subset of feature inputs to the regression: a KxN matrix.
coupling: A JxK Wasserstein distance coupling matrix of the kind computed by
`optimal_transport.wasserstein_coupling_on_the_real_line`. Can be a
scipy.sparse sparse matrix.
theta: Regression parameters; a vector of length N or N+1.
Returns:
The gradient as described.
"""
x1 = _maybe_add_intercept(x1, theta)
x2 = _maybe_add_intercept(x2, theta)
weights_1 = np.asarray(np.sum(coupling, axis=1)).ravel()
weights_2 = np.asarray(np.sum(coupling, axis=0)).ravel()
# The prior two lines used to be as follows, but are revised to the above to
# allow use of sparse matrices.
# weights_1 = np.sum(coupling, axis=1)
# weights_2 = np.sum(coupling, axis=0)
h1 = sigmoid(np.dot(x1, theta)) # was matmul
h2 = sigmoid(np.dot(x2, theta)) # was matmul
nh1p1 = 1 - h1 # The name means "negative h1 plus 1".
nh2p1 = 1 - h2
term_1 = 2 * np.dot(weights_1 * nh1p1 * h1**2, x1) # was matmul
term_2 = 2 * np.dot(weights_2 * nh2p1 * h2**2, x2) # was matmul
term_3 = -2 * (np.dot(coupling.dot(h2) * h1 * nh1p1, x1) + # was matmul
np.dot(coupling.T.dot(h1) * h2 * nh2p1, x2)) # was matmul
# The last line of term_3 used to be as follows, but is revised to the
# above to allow use of sparse matrices.
# np.dot(np.dot(h1, coupling) * h2 * nh2p1, x2)) # was matmul
return term_1 + term_2 + term_3
def wass_one_barycenter_loss_gradient(b, x2, coupling, theta, delta):
"""Gradient of the Wass1 barycenter loss with respect to theta.
Args:
b: barycenter distribution as a column Lx1 of scores.
x2: A subset of feature inputs to the regression: a KxN matrix.
coupling: A LxK Wasserstein distance coupling matrix of the kind computed by
`optimal_transport.wasserstein_coupling_on_the_real_line`. Can be a
scipy.sparse sparse matrix.
theta: Regression parameters; a vector of length N or N+1.
delta: pseudo-Huber loss parameter.
Returns:
The gradient as described.
"""
x2 = _maybe_add_intercept(x2, theta)
h2 = sigmoid(np.dot(x2, theta)) # was matmul
mask = (coupling != 0.0)
if sparse.issparse(coupling):
diff = (mask.dot(sparse.spdiags(h2, 0, len(h2), len(h2))) -
sparse.spdiags(b, 0, len(b), len(b)).dot(mask))
else:
diff = h2.reshape((1, -1)) - b.reshape((-1, 1))
denom = (diff / delta).power(2.0) # For dense matrices, entries in denom
denom[mask] += 1.0 # and d_huber_d_diff that correspond to
denom = np.sqrt(denom) # zero entries in mask will be nonsense
# and should be ignored. The elementwise
d_huber_d_diff = diff.copy() # multiplication with coupling below will
d_huber_d_diff[mask] /= denom[mask] # zero out those entries.
if sparse.issparse(coupling):
coupling_d_huber_d_diff = coupling.multiply(d_huber_d_diff)
else:
coupling_d_huber_d_diff = np.multiply(coupling, d_huber_d_diff)
d_h2_d_logits2 = h2 * (1 - h2)
# Now we create weight vectors that measure how much the sigmoid and pseudo-
# Huber derivatives weight the data points (which are the derivatives of the
# logits with respect to theta.
weights2 = (d_h2_d_logits2 *
np.asarray(np.sum(coupling_d_huber_d_diff, axis=0)).ravel())
# Finally, the gradient of theta itself.
return np.dot(weights2, x2)
def wasserstein_one_loss_gradient_method_one(x1, x2, coupling, theta, delta):
"""Gradient of the Wasserstein-1 distance loss with respect to theta.
To ensure differentiability, we substitute a pseudo-Huber loss term for the
absolute value difference used by the true Wasserstein-1 gradient. The delta
parameter controls how curvy the minimum is---but also controls the how steep
the gradient is far away from 0. See Wikipedia for more details:
https://en.wikipedia.org/wiki/Huber_loss#Pseudo-Huber_loss_function.
The regression parameters `theta` may have one more entry than there are
features (columns) in `x1` or `x2`, in which case an intercept term will be
added to both prior to computing the gradient.
The `wasserstein_one_loss_gradient_method_two` function is an alternative
implementation of this method.
Args:
x1: A subset of feature inputs to the regression: a JxN matrix.
x2: Another subset of feature inputs to the regression: a KxN matrix.
coupling: A JxK Wasserstein distance coupling matrix of the kind computed by
`optimal_transport.wasserstein_coupling_on_the_real_line`. Must be a
scipy.sparse sparse matrix.
theta: Regression parameters; a vector of length N or N+1.
delta: The pseudo-Huber loss delta parameter.
Returns:
The gradient as described.
Raises:
TypeError: coupling is not a scipy.sparse matrix.
"""
if not sparse.issparse(coupling): raise TypeError(
'The `coupling` argument to `wasserstein_one_loss_gradient` must be '
'a scipy.sparse sparse matrix.')
x1 = _maybe_add_intercept(x1, theta)
x2 = _maybe_add_intercept(x2, theta)
# The predictions for the inputs:
logits1 = np.dot(x1, theta)
logits2 = np.dot(x2, theta)
h1 = sigmoid(logits1)
h2 = sigmoid(logits2)
# First, we compute a matrix where element j,k expresses:
# coupling[j,k] * diff[j,k] / sqrt(1 + (diff[j,k] / delta)^2)
# where
# diff[j,k] = h1[j] - h2[k].
# If coupling is sparse, we only compute diff entries where coupling != 0.
mask = (coupling != 0.0)
if sparse.issparse(coupling):
diff = (sparse.spdiags(h1, 0, len(h1), len(h1)).dot(mask) -
mask.dot(sparse.spdiags(h2, 0, len(h2), len(h2))))
else:
diff = h1.reshape((-1, 1)) - h2.reshape((1, -1))
denom = (diff / delta).power(2.0) # For dense matrices, entries in denom
denom[mask] += 1.0 # and d_huber_d_diff that correspond to
denom = np.sqrt(denom) # zero entries in mask will be nonsense
# and should be ignored. The elementwise
d_huber_d_diff = diff.copy() # multiplication with coupling below will
d_huber_d_diff[mask] /= denom[mask] # zero out those entries.
if sparse.issparse(coupling):
coupling_d_huber_d_diff = coupling.multiply(d_huber_d_diff)
else:
coupling_d_huber_d_diff = np.multiply(coupling, d_huber_d_diff)
# Differentiate the sigmoids w.r.t. their arguments. This is required for
# differentiation of diff, which is required by the chain rule.
d_h1_d_logits1 = h1 * (1 - h1)
d_h2_d_logits2 = h2 * (1 - h2)
# Now we create weight vectors that measure how much the sigmoid and pseudo-
# Huber derivatives weight the data points (which are the derivatives of the
# logits with respect to theta.
weights1 = (d_h1_d_logits1 *
np.asarray(np.sum(coupling_d_huber_d_diff, axis=1)).ravel())
weights2 = (d_h2_d_logits2 *
np.asarray(np.sum(coupling_d_huber_d_diff, axis=0)).ravel())
# Finally, the gradient of theta itself.
term1 = np.dot(weights1, x1)
term2 = np.dot(weights2, x2)
return term1 - term2
def wasserstein_one_loss_gradient_method_two(x1, x2, coupling, theta, delta):
"""Compute Wasserstein-1 loss gradient using the Wass-2 gradient.
To ensure differentiability, we substitute a pseudo-Huber loss term for the
absolute value difference used by the true Wasserstein-1 gradient. The delta
parameter controls how curvy the minimum is---but also controls the how steep
the gradient is far away from 0. See Wikipedia for more details:
https://en.wikipedia.org/wiki/Huber_loss#Pseudo-Huber_loss_function.
The regression parameters `theta` may have one more entry than there are
features (columns) in `x1` or `x2`, in which case an intercept term will be
added to both prior to computing the gradient.
The `wasserstein_one_loss_gradient_method_one` function is an alternative
implementation of this method.
Args:
x1: A subset of feature inputs to the regression: a JxN matrix.
x2: Another subset of feature inputs to the regression: a KxN matrix.
coupling: A JxK Wasserstein distance coupling matrix of the kind computed by
`optimal_transport.wasserstein_coupling_on_the_real_line`. Must be a
scipy.sparse sparse matrix.
theta: Regression parameters; a vector of length N or N+1.
delta: The pseudo-Huber loss delta parameter.
Returns:
The gradient as described.
Raises:
TypeError: coupling is not a scipy.sparse matrix.
"""
xx1 = _maybe_add_intercept(x1, theta)
xx2 = _maybe_add_intercept(x2, theta)
h1 = sigmoid(np.dot(xx1, theta))
h2 = sigmoid(np.dot(xx2, theta))
mask = (coupling != 0.0)
if sparse.issparse(coupling):
diff = (sparse.spdiags(h1, 0, len(h1), len(h1)).dot(mask) -
mask.dot(sparse.spdiags(h2, 0, len(h2), len(h2))))
else:
diff = h1.reshape((-1, 1)) - h2.reshape((1, -1))
denom = (diff / delta).power(2.0)
denom[mask] += 1.0
denom = np.sqrt(denom)
multiplier = (0.5 * denom.power(-1))
return wasserstein_two_loss_gradient(
x1, x2, coupling.multiply(multiplier), theta)
def predict_prob(x, theta):
"""Predict probabilities for input datapoints x."""
x = _maybe_add_intercept(x, theta)
return sigmoid(np.dot(x, theta)) # was matmul
def predict(x, theta, threshold):
"""Predict outcomes for input datapoints x via thresholding."""
return predict_prob(x, theta) > threshold
### Private helpers ###
def _maybe_add_intercept(x, theta):
"""Append intercept column to feature data `x` if `theta` needs one.
Also converts x to a numpy array if needed.
Args:
x: Feature matrix; MxN. Each row is a data point.
theta: Regression parameters; a vector of length N or N+1.
Returns:
`x` if `x` is as wide as `theta` is long; otherwise, `x` with an
additional intercept column of ones.
Raises:
ValueError: `len(theta)` isn't in `[len(x[0]), len(x[0]) + 1]`.
"""
x = np.asarray(x)
theta = np.asarray(theta)
if x.shape[1] == theta.shape[-1] - 1:
return _add_intercept(x)
elif x.shape[1] == theta.shape[-1]:
return x
else:
raise ValueError(
'Shape mismatch when deciding whether to add an intercept column to '
'the data: x.shape={}, theta.shape={}'.format(x.shape, theta.shape))
def _add_intercept(x):
"""Append an intercept column to the feature data `x`."""
return np.pad(x, ((0, 0), (0, 1)), mode='constant',
constant_values=((0., 0.), (0., 1.)))
|
wasserstein_fairness-master
|
wasserstein_fairness/basic_costs.py
|
# Copyright 2019 the wasserstein_fairness Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
wasserstein_fairness-master
|
wasserstein_fairness/__init__.py
|
# Copyright 2019 the wasserstein_fairness Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gradient functions etc. combining multiple Wasserstein Fairness objectives.
A companion module to the `basic_costs.py` module, where individual component
losses that are combined in this module's methods are defined.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import scipy.sparse as sparse
from wasserstein_fairness import basic_costs
from wasserstein_fairness import optimal_transport
def gradient_smoothed_logistic(dataframes_all_data, dataframes_protected,
theta, lambda_, beta, alpha,
distance='wasserstein-2', delta=0.1,
baryscore=None):
"""Calculate parameter gradient for Wasserstein-fair logistic regression.
This function uses the Sinkhorn algorithm to compute a smoothed coupling
between predictions for separate datasets.
The regression parameters `theta` may have one more entry than there are
features (columns) in the input data, in which case an intercept term will be
added to that data prior to calculating the gradient.
Args:
dataframes_all_data: a 2-tuple whose members are [0] feature data inputs to
the regression as a Pandas dataframe and [1] {0, 1} target outputs of
the regression as a Pandas dataframe.
dataframes_protected: a collection of feature data inputs (like the first
element of `dataframes_all_data`) whose rows are limited to only those
entries concerning members of protected categories, etc.
theta: Regression parameters; a vector of length N or N+1.
lambda_: A regulariser for computing the Wasserstein coupling.
beta: Penalisation weight for the Wasserstein fairness loss.
alpha: Tradeoff penalty weight between regression loss(/gradient) and
the Wasserstein loss(/gradient):
`loss = alpha * logistic_loss + (1-alpha) * beta * Wasserstein_loss`.
distance: selects the distribution distance to use for computing fairness
gradients and costs. Valid values are 'wasserstein-1' and
'wasserstein-2'.
delta: delta parameter for keeping the 'wasserstein-1' difference
differentiable. (We replace the absolute value function with the
pseudo-Huber loss.) (The actual Wasserstein cost still uses the
absolute value.)
baryscore: (d,) array that distributes as the barycenter of distributions.
Returns:
A 3-tuple with the following members:
[0]: Wasserstein-fair logistic regression gradient.
[1]: Regression objective cost.
[2]: Wasserstein objective cost, unscaled (i.e. not multiplied by beta.)
"""
def get_coupling(outputs_all_data, op):
"""Compute smoothed Wasserstein coupling."""
cost_matrix = (
(np.reshape(outputs_all_data**2, (-1, 1)) +
np.reshape(op**2, (1, -1))) -
2.0 * np.outer(outputs_all_data, op))
p = np.ones_like(outputs_all_data) / len(outputs_all_data)
q = np.ones_like(op) / len(op)
_, _, coupling = optimal_transport.sinkhorn(cost_matrix, lambda_, p, q)
return coupling
return _gradient_function_core(
dataframes_all_data, dataframes_protected, theta, get_coupling,
beta, alpha, distance, delta, baryscore)
def gradient_line_logistic(dataframes_all_data, dataframes_protected,
theta, beta, alpha,
distance='wasserstein-2', delta=0.1, baryscore=None):
"""Calculate parameter gradient for Wasserstein-fair logistic regression.
This function uses the "hard" Wasserstein coupling between predictions for
separate datasets.
The regression parameters `theta` may have one more entry than there are
features (columns) in the input data, in which case an intercept term will be
added to that data prior to calculating the gradient.
Args:
dataframes_all_data: a 2-tuple whose members are [0] feature data inputs to
the regression as a Pandas dataframe and [1] {0, 1} target outputs of
the regression as a Pandas dataframe.
dataframes_protected: a collection of feature data inputs (like the first
element of `dataframes_all_data`) whose rows are limited to only those
entries concerning members of protected categories, etc.
theta: Regression parameters; a vector of length N or N+1.
beta: Penalisation weight for the Wasserstein fairness loss.
alpha: Tradeoff penalty weight between regression loss(/gradient) and
the Wasserstein loss(/gradient):
`loss = alpha * logistic_loss + (1-alpha) * beta * Wasserstein_loss`.
distance: selects the distribution distance to use for computing fairness
gradients and costs. Valid values are 'wasserstein-1' and
'wasserstein-2'.
delta: delta parameter for keeping the 'wasserstein-1' difference
differentiable. (We replace the absolute value function with the
pseudo-Huber loss.) (The actual Wasserstein cost still uses the
absolute value.)
baryscore: (d,) array that distributes as the barycenter of distributions.
Returns:
A 3-tuple with the following members:
[0]: Wasserstein-fair logistic regression gradient.
[1]: Regression objective cost.
[2]: Wasserstein objective cost, unscaled (i.e. not multiplied by beta.)
"""
def get_coupling(outputs_all_data, op):
return optimal_transport.wasserstein_coupling_on_the_real_line(
outputs_all_data, op)
return _gradient_function_core(
dataframes_all_data, dataframes_protected, theta, get_coupling,
beta, alpha, distance, delta, baryscore)
def _gradient_function_core(dataframes_all_data, dataframes_protected,
theta, get_coupling, beta, alpha, distance, delta,
baryscore):
"""Common implementation for both `gradient_*` functions.
Args:
dataframes_all_data: a 2-tuple whose members are [0] feature data inputs to
the regression as a Pandas dataframe and [1] {0, 1} target outputs of
the regression as a Pandas dataframe.
dataframes_protected: a collection of feature data inputs (like the first
element of `dataframes_all_data`) whose rows are limited to only those
entries concerning members of protected categories, etc.
theta: Regression parameters; a vector of length N or N+1.
get_coupling: a function taking two arguments:
outputs_all_data, N regression predictions for the entire dataset.
op, M predictions for a subset expressing some sensitive attribute.
This function should return an NxM coupling matrix, which can be a
scipy.sparse matrix if desired.
beta: Penalisation weight for the Wasserstein fairness loss.
alpha: Tradeoff penalty weight between regression loss(/gradient) and
the Wasserstein loss(/gradient):
`loss = alpha * logistic_loss + (1-alpha) * beta * Wasserstein_loss`.
distance: selects the distribution distance to use for computing fairness
gradients and costs. Valid values are 'wasserstein-1' and
'wasserstein-2'.
delta: delta parameter for keeping the 'wasserstein-1' difference
differentiable. (We replace the absolute value function with the
pseudo-Huber loss.) (The actual Wasserstein cost still uses the
absolute value.)
baryscore: (d,) array that distributes as the barycenter of distributions.
Returns:
A 3-tuple with the following members:
[0]: Wasserstein-fair logistic regression gradient.
[1]: Regression objective cost.
[2]: Wasserstein objective cost, unscaled (i.e. not multiplied by beta.)
Raises:
ValueError: unrecognised distance type specified for `distance`.
"""
# Gather input data.
x_all_data = np.array(dataframes_all_data[0])
y_all_data = np.array(dataframes_all_data[1])
x_protected = [np.array(dp) for dp in dataframes_protected]
# Run model on all and on protected inputs.
outputs_all_data = basic_costs.predict_prob(x_all_data, theta)
outputs_protected = [
basic_costs.predict_prob(xp, theta) for xp in x_protected]
# Compute the gradient and cost of the logistic regression for all data.
grad_logistic = basic_costs.regression_loss_gradient(
x_all_data, y_all_data, theta)
cost_logistic = basic_costs.regression_loss(
outputs_all_data, y_all_data)
# Compute the gradient and cost of the Wasserstein loss for each protected
# group. Wasserstein comparisons are between the predictions for the protected
# group and the predictions for all data.
grad_wasserstein = np.zeros_like(theta)
cost_wasserstein = 0.0
if beta > 0.0:
for xp, op in zip(x_protected, outputs_protected):
# Compute fairness costs and gradients.
if distance == 'wasserstein-2':
assert baryscore is None
# Compute coupling matrix. This may be sparse.
coupling = get_coupling(outputs_all_data, op)
# Compute the Wasserstein-2 cost. The naive way to implement this
# calculation is:
# np.trace(np.dot(cost_matrix, coupling.T))
# or
# np.trace(np.dot(coupling, cost_matrix.T))
# where
# cost_matrix = (
# (np.reshape(outputs_all_data**2, (-1, 1)) +
# np.reshape(op**2, (1, -1))) -
# 2.0 * np.outer(outputs_all_data, op))
# but the resulting cost_matrix is huge. Instead, we can compute costs
# by summing the trace of all three terms in cost_matrix, which reduces
# memory use considerably and avoids costly matrix multiplications.
cost_wasserstein += np.array(
coupling.sum(axis=1)).ravel().dot(outputs_all_data**2)
cost_wasserstein += coupling.dot(op**2).sum()
cost_wasserstein -= 2.0 * coupling.dot(op).dot(outputs_all_data)
# Compute the Wasserstein gradient.
grad_wasserstein += basic_costs.wasserstein_two_loss_gradient(
x_all_data, xp, coupling, theta)
elif distance == 'wasserstein-1':
# Compute the Wasserstein-1 cost. This calculation is much like the
# Wasserstein-1 cost, where a naive calculation of the cost matrix
# would be:
# cost_matrix = ans(
# np.reshape(outputs_all_data, (-1, 1)) -
# np.reshape(op, (1, -1))
if baryscore is not None:
od = baryscore
else:
od = outputs_all_data
# Compute coupling matrix. This may be sparse.
coupling = get_coupling(od, op)
if sparse.issparse(coupling):
cost_wasserstein += abs(
sparse.spdiags(od, 0, len(od), len(od)).dot(coupling) -
coupling.dot(sparse.spdiags(op, 0, len(op), len(op)))).sum()
else:
cost_wasserstein += abs(
od.reshape((-1, 1)) * coupling -
coupling * od.reshape((1, -1))).sum()
# Compute the Wasserstein gradient.
if baryscore is not None:
grad_wasserstein += basic_costs.wass_barycenter_loss_gradient(
baryscore, xp, coupling, theta, delta)
else:
grad_wasserstein += basic_costs.wasserstein_one_loss_gradient(
x_all_data, xp, coupling, theta, delta)
# All done. Totalise and return.
logging.debug(' logistic cost: %f', cost_logistic)
logging.debug('wasserstein cost: %f', cost_wasserstein)
return (alpha * grad_logistic + (1 - alpha) * beta * grad_wasserstein,
cost_logistic, cost_wasserstein)
|
wasserstein_fairness-master
|
wasserstein_fairness/combined_costs.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
streetlearn-master
|
streetlearn/__init__.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main experiment file for the StreetLearn agent, based on an implementation of
Importance Weighted Actor-Learner Architectures.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
Note that this derives from code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import os
import sys
import time
import numpy as np
from six.moves import range
import sonnet as snt
import tensorflow.compat.v1 as tf
from streetlearn.python.agents import goal_nav_agent
from streetlearn.python.agents import city_nav_agent
from streetlearn.python.scalable_agent import py_process
from streetlearn.python.scalable_agent import vtrace
from streetlearn.python.environment import default_config
from streetlearn.python.environment import streetlearn
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import staging as contrib_staging
nest = contrib_framework.nest
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string('logdir', '/tmp/agent', 'TensorFlow log directory.')
flags.DEFINE_enum('mode', 'train', ['train', 'test'], 'Training or test mode.')
# Flags used for testing.
flags.DEFINE_integer('test_num_episodes', 10, 'Number of episodes per level.')
# Flags used for distributed training.
flags.DEFINE_integer('task', -1, 'Task id. Use -1 for local training.')
flags.DEFINE_enum('job_name', 'learner', ['learner', 'actor'],
'Job name. Ignored when task is set to -1.')
flags.DEFINE_string('master', '', 'Session master.')
# Training.
flags.DEFINE_integer('total_environment_frames', int(1e9),
'Total environment frames to train for.')
flags.DEFINE_integer('num_actors', 1, 'Number of actors.')
flags.DEFINE_integer('batch_size', 1, 'Batch size for training.')
flags.DEFINE_integer('unroll_length', 50, 'Unroll length in agent steps.')
flags.DEFINE_integer('seed', 1, 'Random seed.')
# Loss settings.
flags.DEFINE_float('entropy_cost', 0.00025, 'Entropy cost/multiplier.')
flags.DEFINE_float('baseline_cost', .5, 'Baseline cost/multiplier.')
flags.DEFINE_float('discounting', .99, 'Discounting factor.')
flags.DEFINE_enum('reward_clipping', 'abs_one', ['abs_one', 'soft_asymmetric'],
'Reward clipping.')
flags.DEFINE_float('heading_prediction_cost', 1.0,
'Auxiliary cost/multiplier for heading prediction.')
flags.DEFINE_float('xy_prediction_cost', 1.0,
'Auxiliary cost/multiplier for XY position prediction.')
flags.DEFINE_float('target_xy_prediction_cost', 1.0,
'Auxiliary cost/multiplier for XY target prediction.')
# Environment settings.
flags.DEFINE_string('game_name', 'curriculum_courier_game',
'Game name for the StreetLearn agent.')
flags.DEFINE_string('level_names', 'manhattan_lowres',
'Lavel name for the StreetLearn agent.')
flags.DEFINE_string('dataset_paths', None, 'Path were the levels are stored.')
flags.DEFINE_integer('width', 84, 'Width of observation.')
flags.DEFINE_integer('height', 84, 'Height of observation.')
flags.DEFINE_integer('graph_width', 84, 'Width of graph visualisation.')
flags.DEFINE_integer('graph_height', 84, 'Height of graph visualisation.')
flags.DEFINE_integer('graph_zoom', 1, 'Zoom in graph visualisation.')
flags.DEFINE_string('start_pano', '',
'Pano at root of partial graph (default: full graph).')
flags.DEFINE_integer('graph_depth', 200, 'Depth of the pano graph.')
flags.DEFINE_integer('frame_cap', 1000, 'Number of frames / episode.')
flags.DEFINE_string('action_set', 'streetlearn_fast_rotate',
'Set of actions used by the agent.')
flags.DEFINE_float('rotation_speed', 22.5,
'Rotation speed of the actor.')
flags.DEFINE_string('observations',
'view_image;graph_image;latlng;target_latlng;yaw;yaw_label;'
'latlng_label;target_latlng_label',
'Observations used by the agent.')
flags.DEFINE_float('timestamp_start_curriculum', time.time(),
'Timestamp at the start of the curriculum.')
flags.DEFINE_float('hours_curriculum_part_1', 0.0,
'Number of hours for 1st part of curriculum.')
flags.DEFINE_float('hours_curriculum_part_2', 24.0,
'Number of hours for 2nd part of curriculum.')
flags.DEFINE_float('min_goal_distance_curriculum', 500.0,
'Maximum distance to goal at beginning of curriculum.')
flags.DEFINE_float('max_goal_distance_curriculum', 3500.0,
'Maximum distance to goal at end of curriculum.')
flags.DEFINE_float('bbox_lat_min', 0, 'Minimum latitude.')
flags.DEFINE_float('bbox_lat_max', 100, 'Maximum latitude.')
flags.DEFINE_float('bbox_lng_min', 0, 'Minimum longitude.')
flags.DEFINE_float('bbox_lng_max', 100, 'Maximum longitude.')
flags.DEFINE_float('min_radius_meters', 100.0, 'Radius of goal area.')
flags.DEFINE_float('max_radius_meters', 200.0, 'Radius of early rewards.')
flags.DEFINE_float('proportion_of_panos_with_coins', 0, 'Proportion of coins.')
# Agent settings.
flags.DEFINE_string('agent', 'city_nav_agent', 'Agent name.')
# Optimizer settings.
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate.')
flags.DEFINE_float('decay', .99, 'RMSProp optimizer decay.')
flags.DEFINE_float('momentum', 0., 'RMSProp momentum.')
flags.DEFINE_float('epsilon', .1, 'RMSProp epsilon.')
# Structure to be sent from actors to learner.
ActorOutput = collections.namedtuple(
'ActorOutput', 'level_name agent_state env_outputs agent_outputs')
AgentOutput = collections.namedtuple('AgentOutput',
'action policy_logits baseline heading')
def is_single_machine():
return FLAGS.task == -1
StepOutputInfo = collections.namedtuple('StepOutputInfo',
'episode_return episode_step')
StepOutput = collections.namedtuple('StepOutput',
'reward info done observation')
class FlowEnvironment(object):
"""An environment that returns a new state for every modifying method.
The environment returns a new environment state for every modifying action and
forces previous actions to be completed first. Similar to `flow` for
`TensorArray`.
Note that this is a copy of the code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
def __init__(self, env):
"""Initializes the environment.
Args:
env: An environment with `initial()` and `step(action)` methods where
`initial` returns the initial observations and `step` takes an action
and returns a tuple of (reward, done, observation). `observation`
should be the observation after the step is taken. If `done` is
True, the observation should be the first observation in the next
episode.
"""
self._env = env
def initial(self):
"""Returns the initial output and initial state.
Returns:
A tuple of (`StepOutput`, environment state). The environment state should
be passed in to the next invocation of `step` and should not be used in
any other way. The reward and transition type in the `StepOutput` is the
reward/transition type that lead to the observation in `StepOutput`.
"""
with tf.name_scope('flow_environment_initial'):
initial_reward = tf.constant(0.)
initial_info = StepOutputInfo(tf.constant(0.), tf.constant(0))
initial_done = tf.constant(True)
initial_observation = self._env.initial()
initial_output = StepOutput(initial_reward, initial_info, initial_done,
initial_observation)
# Control dependency to make sure the next step can't be taken before the
# initial output has been read from the environment.
with tf.control_dependencies(nest.flatten(initial_output)):
initial_flow = tf.constant(0, dtype=tf.int64)
initial_state = (initial_flow, initial_info)
return initial_output, initial_state
def step(self, action, state):
"""Takes a step in the environment.
Args:
action: An action tensor suitable for the underlying environment.
state: The environment state from the last step or initial state.
Returns:
A tuple of (`StepOutput`, environment state). The environment state should
be passed in to the next invocation of `step` and should not be used in
any other way. On episode end (i.e. `done` is True), the returned reward
should be included in the sum of rewards for the ending episode and not
part of the next episode.
"""
with tf.name_scope('flow_environment_step'):
flow, info = nest.map_structure(tf.convert_to_tensor, state)
# Make sure the previous step has been executed before running the next
# step.
with tf.control_dependencies([flow]):
reward, done, observation = self._env.step(action)
with tf.control_dependencies(nest.flatten(observation)):
new_flow = tf.add(flow, 1)
# When done, include the reward in the output info but not in the
# state for the next step.
new_info = StepOutputInfo(info.episode_return + reward,
info.episode_step + 1)
new_state = new_flow, nest.map_structure(
lambda a, b: tf.where(done, a, b),
StepOutputInfo(tf.constant(0.), tf.constant(0)), new_info)
output = StepOutput(reward, new_info, done, observation)
return output, new_state
class StreetLearnImpalaAdapter(streetlearn.StreetLearn):
def __init__(self, dataset_path, config, game):
super(StreetLearnImpalaAdapter, self).__init__(dataset_path, config, game)
self.reset()
def initial(self):
"""Returns the original observation."""
super(StreetLearnImpalaAdapter, self).step([0.0, 0.0, 0.0, 0.0])
observation = self._reshape_observation(self.observation())
return observation
def step(self, action):
"""Takes a step in the environment.
Args:
action: a 1d array containing a combination of actions.
Returns:
reward: float value.
done: boolean indicator.
observation: observation at the last step.
"""
(observation, reward, done, _) = super(
StreetLearnImpalaAdapter, self).step(action)
reward = np.array(reward, dtype=np.float32)
observation = self._reshape_observation(observation)
return reward, done, observation
def _reshape_observation(self, observation):
return [
np.transpose(np.reshape(observation['view_image'],
[3, FLAGS.height, FLAGS.width]),
axes=(1, 2, 0)),
np.transpose(np.reshape(observation['graph_image'],
[3, FLAGS.graph_height, FLAGS.graph_width]),
axes=(1, 2, 0)),
observation['latlng'],
observation['target_latlng'],
observation['yaw'],
observation['yaw_label'],
observation['latlng_label'],
observation['target_latlng_label'],
]
@staticmethod
def _tensor_specs(method_name, unused_kwargs, constructor_kwargs):
"""Returns a nest of `TensorSpec` with the method's output specification."""
observation_spec = [
contrib_framework.TensorSpec([FLAGS.height, FLAGS.width, 3], tf.uint8),
contrib_framework.TensorSpec([FLAGS.graph_height, FLAGS.graph_width, 3],
tf.uint8),
contrib_framework.TensorSpec([
2,
], tf.float64),
contrib_framework.TensorSpec([
2,
], tf.float64),
contrib_framework.TensorSpec([], tf.float64),
contrib_framework.TensorSpec([], tf.uint8),
contrib_framework.TensorSpec([], tf.int32),
contrib_framework.TensorSpec([], tf.int32),
]
if method_name == 'initial':
return observation_spec
elif method_name == 'step':
return (
contrib_framework.TensorSpec([], tf.float32),
contrib_framework.TensorSpec([], tf.bool),
observation_spec,
)
def build_actor(agent, env, level_name, action_set):
"""Builds the actor loop."""
# Initial values.
initial_env_output, initial_env_state = env.initial()
initial_agent_state = agent.initial_state(1)
initial_action = tf.zeros([1], dtype=tf.int32)
dummy_agent_output, _ = agent(
(initial_action,
nest.map_structure(lambda t: tf.expand_dims(t, 0), initial_env_output)),
initial_agent_state)
initial_agent_output = nest.map_structure(
lambda t: tf.zeros(t.shape, t.dtype), dummy_agent_output)
# All state that needs to persist across training iterations. This includes
# the last environment output, agent state and last agent output. These
# variables should never go on the parameter servers.
def create_state(t):
# Creates a unique variable scope to ensure the variable name is unique.
with tf.variable_scope(None, default_name='state'):
return tf.get_local_variable(t.op.name, initializer=t, use_resource=True)
persistent_state = nest.map_structure(
create_state, (initial_env_state, initial_env_output, initial_agent_state,
initial_agent_output))
def step(input_, unused_i):
"""Steps through the agent and the environment."""
env_state, env_output, agent_state, agent_output = input_
# Run agent.
action = agent_output[0]
batched_env_output = nest.map_structure(lambda t: tf.expand_dims(t, 0),
env_output)
agent_output, agent_state = agent((action, batched_env_output), agent_state)
# Convert action index to the native action.
action = agent_output[0][0]
raw_action = tf.gather(action_set, action)
env_output, env_state = env.step(raw_action, env_state)
return env_state, env_output, agent_state, agent_output
# Run the unroll. `read_value()` is needed to make sure later usage will
# return the first values and not a new snapshot of the variables.
first_values = nest.map_structure(lambda v: v.read_value(), persistent_state)
_, first_env_output, first_agent_state, first_agent_output = first_values
# Use scan to apply `step` multiple times, therefore unrolling the agent
# and environment interaction for `FLAGS.unroll_length`. `tf.scan` forwards
# the output of each call of `step` as input of the subsequent call of `step`.
# The unroll sequence is initialized with the agent and environment states
# and outputs as stored at the end of the previous unroll.
# `output` stores lists of all states and outputs stacked along the entire
# unroll. Note that the initial states and outputs (fed through `initializer`)
# are not in `output` and will need to be added manually later.
output = tf.scan(step, tf.range(FLAGS.unroll_length), first_values)
_, env_outputs, _, agent_outputs = output
# Update persistent state with the last output from the loop.
assign_ops = nest.map_structure(lambda v, t: v.assign(t[-1]),
persistent_state, output)
# The control dependency ensures that the final agent and environment states
# and outputs are stored in `persistent_state` (to initialize next unroll).
with tf.control_dependencies(nest.flatten(assign_ops)):
# Remove the batch dimension from the agent state/output.
first_agent_state = nest.map_structure(lambda t: t[0], first_agent_state)
first_agent_output = nest.map_structure(lambda t: t[0], first_agent_output)
agent_outputs = nest.map_structure(lambda t: t[:, 0], agent_outputs)
# Concatenate first output and the unroll along the time dimension.
full_agent_outputs, full_env_outputs = nest.map_structure(
lambda first, rest: tf.concat([[first], rest], 0),
(first_agent_output, first_env_output), (agent_outputs, env_outputs))
output = ActorOutput(
level_name=level_name, agent_state=first_agent_state,
env_outputs=full_env_outputs, agent_outputs=full_agent_outputs)
# No backpropagation should be done here.
return nest.map_structure(tf.stop_gradient, output)
def compute_baseline_loss(advantages):
# Loss for the baseline, summed over the time dimension.
# Multiply by 0.5 to match the standard update rule:
# d(loss) / d(baseline) = advantage
return .5 * tf.reduce_sum(tf.square(advantages))
def compute_entropy_loss(logits):
policy = tf.nn.softmax(logits)
log_policy = tf.nn.log_softmax(logits)
entropy_per_timestep = tf.reduce_sum(-policy * log_policy, axis=-1)
return -tf.reduce_sum(entropy_per_timestep)
def compute_classification_loss(logits, labels):
classification_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
return tf.reduce_sum(classification_loss)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=actions, logits=logits)
advantages = tf.stop_gradient(advantages)
policy_gradient_loss_per_timestep = cross_entropy * advantages
return tf.reduce_sum(policy_gradient_loss_per_timestep)
def plot_logits_2d(logits, num_x, num_y):
"""Plot logits as 2D images."""
logits_2d = tf.reshape(logits, shape=[-1, num_y, num_x])
logits_2d = tf.expand_dims(tf.expand_dims(logits_2d[:, ::-1, :], 1), -1)
return logits_2d
def build_learner(agent, agent_state, env_outputs, agent_outputs):
"""Builds the learner loop.
Args:
agent: A snt.RNNCore module outputting `AgentOutput` named tuples, with an
`unroll` call for computing the outputs for a whole trajectory.
agent_state: The initial agent state for each sequence in the batch.
env_outputs: A `StepOutput` namedtuple where each field is of shape
[T+1, ...].
agent_outputs: An `AgentOutput` namedtuple where each field is of shape
[T+1, ...].
Returns:
A tuple of (done, infos, and environment frames) where
the environment frames tensor causes an update.
"""
learner_outputs, _ = agent.unroll(agent_outputs.action, env_outputs,
agent_state)
# Use last baseline value (from the value function) to bootstrap.
bootstrap_value = learner_outputs.baseline[-1]
# At this point, the environment outputs at time step `t` are the inputs that
# lead to the learner_outputs at time step `t`. After the following shifting,
# the actions in agent_outputs and learner_outputs at time step `t` is what
# leads to the environment outputs at time step `t`.
agent_outputs = nest.map_structure(lambda t: t[1:], agent_outputs)
rewards, infos, done, observations = nest.map_structure(
lambda t: t[1:], env_outputs)
learner_outputs = nest.map_structure(lambda t: t[:-1], learner_outputs)
observation_names = FLAGS.observations.split(';')
if FLAGS.reward_clipping == 'abs_one':
clipped_rewards = tf.clip_by_value(rewards, -1, 1)
elif FLAGS.reward_clipping == 'soft_asymmetric':
squeezed = tf.tanh(rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = tf.where(rewards < 0, .3 * squeezed, squeezed) * 5.
discounts = tf.to_float(~done) * FLAGS.discounting
# Compute V-trace returns and weights.
# Note, this is put on the CPU because it's faster than on GPU. It can be
# improved further with XLA-compilation or with a custom TensorFlow operation.
with tf.device('/cpu'):
vtrace_returns = vtrace.from_logits(
behaviour_policy_logits=agent_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=agent_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs.baseline,
bootstrap_value=bootstrap_value)
# Compute loss as a weighted sum of the baseline loss, the policy gradient
# loss and an entropy regularization term.
rl_loss_policy_gradient = compute_policy_gradient_loss(
learner_outputs.policy_logits, agent_outputs.action,
vtrace_returns.pg_advantages)
rl_loss_baseline = FLAGS.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs.baseline)
rl_loss_entropy = FLAGS.entropy_cost * compute_entropy_loss(
learner_outputs.policy_logits)
total_loss = rl_loss_policy_gradient + rl_loss_baseline + rl_loss_entropy
# Add auxiliary loss for heading prediction.
if 'yaw_label' in observation_names:
idx_yaw_label = observation_names.index('yaw_label')
yaw_logits = learner_outputs.heading
yaw_labels = tf.cast(observations[idx_yaw_label], dtype=tf.int32)
heading_loss = FLAGS.heading_prediction_cost * compute_classification_loss(
yaw_logits, yaw_labels)
total_loss += heading_loss
# Add auxiliary loss for XY position and XY target position prediction.
if 'latlng_label' in observation_names:
idx_latlng_label = observation_names.index('latlng_label')
xy_logits = learner_outputs.xy
xy_labels = tf.cast(observations[idx_latlng_label], dtype=tf.int32)
xy_loss = FLAGS.xy_prediction_cost * compute_classification_loss(
xy_logits, xy_labels)
total_loss += xy_loss
if 'target_latlng_label' in observation_names:
idx_target_latlng_label = observation_names.index('target_latlng_label')
target_xy_logits = learner_outputs.target_xy
target_xy_labels = tf.cast(observations[idx_target_latlng_label],
dtype=tf.int32)
target_xy_loss = (
FLAGS.target_xy_prediction_cost * compute_classification_loss(
target_xy_logits, target_xy_labels))
total_loss += target_xy_loss
# Optimization
num_env_frames = tf.train.get_global_step()
learning_rate = tf.train.polynomial_decay(FLAGS.learning_rate, num_env_frames,
FLAGS.total_environment_frames, 0)
optimizer = tf.train.RMSPropOptimizer(learning_rate, FLAGS.decay,
FLAGS.momentum, FLAGS.epsilon)
train_op = optimizer.minimize(total_loss)
# Merge updating the network and environment frames into a single tensor.
with tf.control_dependencies([train_op]):
num_env_frames_and_train = num_env_frames.assign_add(
FLAGS.batch_size * FLAGS.unroll_length)
# Adding a few summaries: RL losses and actions.
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('rl_loss_policy_gradient',
rl_loss_policy_gradient)
tf.summary.scalar('rl_loss_baseline', rl_loss_baseline)
tf.summary.scalar('rl_loss_entropy', rl_loss_entropy)
if 'yaw_label' in observation_names:
tf.summary.scalar('heading_loss', heading_loss)
if 'latlng_label' in observation_names:
tf.summary.scalar('xy_loss', xy_loss)
if 'target_latlng_label' in observation_names:
tf.summary.scalar('target_xy_loss', target_xy_loss)
tf.summary.scalar('total_loss', total_loss)
tf.summary.histogram('action', agent_outputs.action)
# Adding a few summaries: agent's view and graph.
idx_frame = observation_names.index('view_image')
frame = observations[idx_frame]
tf.summary.image('frame', frame[:3, 0, :, :, :])
idx_graph = observation_names.index('graph_image')
street_graph = observations[idx_graph]
tf.summary.image('street_graph', street_graph[:3, 0, :, :, :])
# Adding a few summaries: current and target lat/lng.
idx_latlng = observation_names.index('latlng')
latlng = observations[idx_latlng]
tf.summary.histogram('current_lat', latlng[:, 0, 0])
tf.summary.histogram('current_lng', latlng[:, 0, 1])
idx_target_latlng = observation_names.index('target_latlng')
target_latlng = observations[idx_target_latlng]
target_latlng = tf.Print(target_latlng, [target_latlng])
tf.summary.histogram('target_lat', target_latlng[:, 0, 0])
tf.summary.histogram('target_lng', target_latlng[:, 0, 1])
# Adding a few summaries: yaw.
if 'yaw' in observation_names:
idx_yaw = observation_names.index('yaw')
yaw = observations[idx_yaw]
tf.summary.histogram('yaw', yaw[:, 0])
# Adding a few summaries: heading prediction.
if 'yaw_label' in observation_names:
img_yaw_labels = tf.expand_dims(
tf.expand_dims(tf.one_hot(tf.cast(yaw_labels, tf.int32), 16), 1), -1)
img_yaw_logits = tf.expand_dims(
tf.expand_dims(tf.nn.softmax(tf.cast(yaw_logits, tf.float32)), 1), -1)
tf.summary.image("yaw_labels", img_yaw_labels[:, :, 0, :, :])
tf.summary.image("yaw_logits", img_yaw_logits[:, :, 0, :, :])
# Adding a few summaries: XY position prediction.
if 'latlng_label' in observation_names:
img_xy_labels = plot_logits_2d(
tf.one_hot(tf.cast(xy_labels[:, 0], tf.int32), 32*32), 32, 32)
img_xy_logits = plot_logits_2d(
tf.nn.softmax(tf.cast(xy_logits[:, 0, :], tf.float32)), 32, 32)
tf.summary.image("xy_labels", img_xy_labels[:, 0, :, :, :])
tf.summary.image("xy_logits", img_xy_logits[:, 0, :, :, :])
# Adding a few summaries: XY position prediction.
if 'target_latlng_label' in observation_names:
img_target_xy_labels = plot_logits_2d(
tf.one_hot(tf.cast(target_xy_labels[:, 0], tf.int32), 32*32), 32, 32)
img_target_xy_logits = plot_logits_2d(
tf.nn.softmax(tf.cast(target_xy_logits, tf.float32)), 32, 32)
tf.summary.image("target_xy_labels", img_target_xy_labels[:, 0, :, :, :])
tf.summary.image("target_xy_logits", img_target_xy_logits[:, 0, :, :, :])
return done, infos, num_env_frames_and_train
def create_environment(level_name, seed, is_test=False):
"""Creates an environment wrapped in a `FlowEnvironment`."""
observations = FLAGS.observations.split(';')
tf.logging.info('Observations requested:')
tf.logging.info(observations)
config = {
'status_height': 0,
'width': FLAGS.width,
'height': FLAGS.height,
'graph_width': FLAGS.graph_width,
'graph_height': FLAGS.graph_height,
'graph_zoom': FLAGS.graph_zoom,
'game_name': FLAGS.game_name,
'goal_timeout': FLAGS.frame_cap,
'frame_cap': FLAGS.frame_cap,
'full_graph': (FLAGS.start_pano == ''),
'start_pano': FLAGS.start_pano,
'min_graph_depth': FLAGS.graph_depth,
'max_graph_depth': FLAGS.graph_depth,
'proportion_of_panos_with_coins':
FLAGS.proportion_of_panos_with_coins,
'timestamp_start_curriculum': FLAGS.timestamp_start_curriculum,
'hours_curriculum_part_1': FLAGS.hours_curriculum_part_1,
'hours_curriculum_part_2': FLAGS.hours_curriculum_part_2,
'min_goal_distance_curriculum': FLAGS.min_goal_distance_curriculum,
'max_goal_distance_curriculum': FLAGS.max_goal_distance_curriculum,
'observations': observations,
'bbox_lat_min': FLAGS.bbox_lat_min,
'bbox_lat_max': FLAGS.bbox_lat_max,
'bbox_lng_min': FLAGS.bbox_lng_min,
'bbox_lng_max': FLAGS.bbox_lng_max,
'min_radius_meters': FLAGS.min_radius_meters,
'max_radius_meters': FLAGS.max_radius_meters,
}
config = default_config.ApplyDefaults(config)
tf.logging.info(config)
game = default_config.CreateGame(config['game_name'], config)
dataset_path = FLAGS.dataset_paths + '/' + level_name
tf.logging.info(dataset_path)
p = py_process.PyProcess(
StreetLearnImpalaAdapter, dataset_path, config, game)
return FlowEnvironment(p.proxy)
@contextlib.contextmanager
def pin_global_variables(device):
"""Pins global variables to the specified device."""
def getter(getter, *args, **kwargs):
var_collections = kwargs.get('collections', None)
if var_collections is None:
var_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if tf.GraphKeys.GLOBAL_VARIABLES in var_collections:
with tf.device(device):
return getter(*args, **kwargs)
else:
return getter(*args, **kwargs)
with tf.variable_scope('', custom_getter=getter) as vs:
yield vs
def create_agent(num_actions):
"""Create the agent."""
assert FLAGS.agent in ['goal_nav_agent', 'city_nav_agent']
if FLAGS.agent == 'city_nav_agent':
agent = city_nav_agent.CityNavAgent(
num_actions, observation_names=FLAGS.observations)
else:
agent = goal_nav_agent.GoalNavAgent(
num_actions, observation_names=FLAGS.observations)
return agent
def train(action_set, level_names):
"""Train."""
if is_single_machine():
local_job_device = ''
shared_job_device = ''
is_actor_fn = lambda i: True
is_learner = True
global_variable_device = '/gpu'
server = tf.train.Server.create_local_server()
server_target = FLAGS.master
filters = []
else:
local_job_device = '/job:%s/task:%d' % (FLAGS.job_name, FLAGS.task)
shared_job_device = '/job:learner/task:0'
is_actor_fn = lambda i: FLAGS.job_name == 'actor' and i == FLAGS.task
is_learner = FLAGS.job_name == 'learner'
# Placing the variable on CPU, makes it cheaper to send it to all the
# actors. Continual copying the variables from the GPU is slow.
global_variable_device = shared_job_device + '/cpu'
cluster = tf.train.ClusterSpec({
'actor': ['localhost:%d' % (8001 + i) for i in range(FLAGS.num_actors)],
'learner': ['localhost:8000']
})
server = tf.train.Server(cluster, job_name=FLAGS.job_name,
task_index=FLAGS.task)
server_target = server.target
filters = [shared_job_device, local_job_device]
# Only used to find the actor output structure.
with tf.Graph().as_default():
agent = create_agent(len(action_set))
env = create_environment(level_names[0], seed=1)
structure = build_actor(agent, env, level_names[0], action_set)
flattened_structure = nest.flatten(structure)
dtypes = [t.dtype for t in flattened_structure]
shapes = [t.shape.as_list() for t in flattened_structure]
with tf.Graph().as_default(), \
tf.device(local_job_device + '/cpu'), \
pin_global_variables(global_variable_device):
tf.set_random_seed(FLAGS.seed) # Makes initialization deterministic.
# Create Queue and Agent on the learner.
with tf.device(shared_job_device):
queue = tf.FIFOQueue(1, dtypes, shapes, shared_name='buffer')
agent = create_agent(len(action_set))
# Build actors and ops to enqueue their output.
enqueue_ops = []
for i in range(FLAGS.num_actors):
if is_actor_fn(i):
level_name = level_names[i % len(level_names)]
tf.logging.info('Creating actor %d with level %s', i, level_name)
env = create_environment(level_name, seed=i + 1)
actor_output = build_actor(agent, env, level_name, action_set)
with tf.device(shared_job_device):
enqueue_ops.append(queue.enqueue(nest.flatten(actor_output)))
# If running in a single machine setup, run actors with QueueRunners
# (separate threads).
if is_learner and enqueue_ops:
tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))
# Build learner.
if is_learner:
# Create global step, which is the number of environment frames processed.
tf.get_variable(
'num_environment_frames',
initializer=tf.zeros_initializer(),
shape=[],
dtype=tf.int64,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
# Create batch (time major) and recreate structure.
dequeued = queue.dequeue_many(FLAGS.batch_size)
dequeued = nest.pack_sequence_as(structure, dequeued)
def make_time_major(s):
return nest.map_structure(
lambda t: tf.transpose(t, [1, 0] + list(range(t.shape.ndims))[2:]),
s)
dequeued = dequeued._replace(
env_outputs=make_time_major(dequeued.env_outputs),
agent_outputs=make_time_major(dequeued.agent_outputs))
with tf.device('/gpu'):
# Using StagingArea allows us to prepare the next batch and send it to
# the GPU while we're performing a training step. This adds up to 1 step
# policy lag.
flattened_output = nest.flatten(dequeued)
area = contrib_staging.StagingArea([t.dtype for t in flattened_output],
[t.shape for t in flattened_output])
stage_op = area.put(flattened_output)
data_from_actors = nest.pack_sequence_as(structure, area.get())
# Unroll agent on sequence, create losses and update ops.
output = build_learner(agent, data_from_actors.agent_state,
data_from_actors.env_outputs,
data_from_actors.agent_outputs)
# Create MonitoredSession (to run the graph, checkpoint and log).
tf.logging.info('Creating MonitoredSession, is_chief %s', is_learner)
# config = tf.ConfigProto(allow_soft_placement=True)
config = tf.ConfigProto(allow_soft_placement=True, device_filters=filters)
with tf.train.MonitoredTrainingSession(
server_target,
is_chief=is_learner,
checkpoint_dir=FLAGS.logdir,
save_checkpoint_secs=600,
save_summaries_secs=30,
log_step_count_steps=50000,
config=config,
hooks=[py_process.PyProcessHook()]) as session:
if is_learner:
tf.logging.info('is_learner')
# Logging.
level_returns = {level_name: [] for level_name in level_names}
summary_writer = tf.summary.FileWriterCache.get(FLAGS.logdir)
# Prepare data for first run.
session.run_step_fn(
lambda step_context: step_context.session.run(stage_op))
# Execute learning and track performance.
num_env_frames_v = 0
while num_env_frames_v < FLAGS.total_environment_frames:
tf.logging.info(num_env_frames_v)
level_names_v, done_v, infos_v, num_env_frames_v, _ = session.run(
(data_from_actors.level_name,) + output + (stage_op,))
level_names_v = np.repeat([level_names_v], done_v.shape[0], 0)
for level_name, episode_return, episode_step in zip(
level_names_v[done_v],
infos_v.episode_return[done_v],
infos_v.episode_step[done_v]):
episode_frames = episode_step
tf.logging.info('Level: %s Episode return: %f',
level_name, episode_return)
summary = tf.summary.Summary()
summary.value.add(tag=level_name + '/episode_return',
simple_value=episode_return)
summary.value.add(tag=level_name + '/episode_frames',
simple_value=episode_frames)
summary_writer.add_summary(summary, num_env_frames_v)
else:
tf.logging.info('actor')
# Execute actors (they just need to enqueue their output).
while True:
session.run(enqueue_ops)
def test(action_set, level_names):
"""Test."""
level_returns = {level_name: [] for level_name in level_names}
with tf.Graph().as_default():
agent = create_agent(len(action_set))
outputs = {}
for level_name in level_names:
env = create_environment(level_name, seed=1, is_test=True)
outputs[level_name] = build_actor(agent, env, level_name, action_set)
with tf.train.SingularMonitoredSession(
checkpoint_dir=FLAGS.logdir,
hooks=[py_process.PyProcessHook()]) as session:
for level_name in level_names:
tf.logging.info('Testing level: %s', level_name)
while True:
done_v, infos_v = session.run((
outputs[level_name].env_outputs.done,
outputs[level_name].env_outputs.info
))
returns = level_returns[level_name]
returns.extend(infos_v.episode_return[1:][done_v[1:]])
if len(returns) >= FLAGS.test_num_episodes:
tf.logging.info('Mean episode return: %f', np.mean(returns))
break
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
action_set = streetlearn.get_action_set(FLAGS.action_set,
FLAGS.rotation_speed)
tf.logging.info(action_set)
level_names = FLAGS.level_names.split(',')
tf.logging.info(level_names)
if FLAGS.mode == 'train':
train(action_set, level_names)
else:
test(action_set, level_names)
if __name__ == '__main__':
tf.app.run()
|
streetlearn-master
|
streetlearn/python/experiment.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic oracle agent for StreetLearn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import pygame
from six.moves import range
from streetlearn.python.environment import default_config
from streetlearn.python.environment import goal_instruction_game
from streetlearn.python.environment import incremental_instruction_game
from streetlearn.python.environment import step_by_step_instruction_game
from streetlearn.python.environment import streetlearn
FLAGS = flags.FLAGS
flags.DEFINE_integer('width', 400, 'Observation and map width.')
flags.DEFINE_integer('height', 400, 'Observation and map height.')
flags.DEFINE_integer('field_of_view', 60, 'Field of view.')
flags.DEFINE_integer('graph_zoom', 1, 'Zoom level.')
flags.DEFINE_boolean('graph_black_on_white', False,
'Show graph as black on white. False by default.')
flags.DEFINE_integer('width_text', 300, 'Text width.')
flags.DEFINE_integer('font_size', 16, 'Font size.')
flags.DEFINE_float('horizontal_rot', 10, 'Horizontal rotation step (deg).')
flags.DEFINE_string('dataset_path', None, 'Dataset path.')
flags.DEFINE_string('instruction_file', None, 'Instruction path.')
flags.DEFINE_string(
'game',
'incremental_instruction_game',
'Game name [goal_instruction_game|'
'incremental_instruction_game|step_by_step_instruction_game]')
flags.DEFINE_float('reward_at_waypoint', 0.5, 'Reward at waypoint.')
flags.DEFINE_float('reward_at_goal', 1.0, 'Reward at waypoint.')
flags.DEFINE_integer('num_instructions', 5, 'Number of instructions.')
flags.DEFINE_integer('max_instructions', 5, 'Maximum number of instructions.')
flags.DEFINE_string('start_pano', '',
'Pano at root of partial graph (default: full graph).')
flags.DEFINE_integer('graph_depth', 200, 'Depth of the pano graph.')
flags.DEFINE_boolean('show_shortest_path', True,
'Whether to highlight the shortest path in the UI.')
flags.DEFINE_integer('frame_cap', 1000, 'Number of frames / episode.')
flags.DEFINE_string('stats_path', None, 'Statistics path.')
flags.mark_flag_as_required('dataset_path')
flags.mark_flag_as_required('instruction_file')
COLOR_WAYPOINT = (0, 178, 178)
COLOR_GOAL = (255, 0, 0)
COLOR_INSTRUCTION = (255, 255, 255)
def blit_instruction(screen, instruction, font, color, x_min, y, x_max):
"""Render and blit a multiline instruction onto the PyGame screen."""
words = instruction.split()
space_width = font.size(' ')[0]
x = x_min
for word in words:
word_surface = font.render(word, True, color)
word_width, word_height = word_surface.get_size()
if x + word_width >= x_max:
x = x_min
y += word_height
screen.blit(word_surface, (x, y))
x += word_width + space_width
def loop(env, screen, x_max, y_max, subsampling, font):
"""Main loop of the oracle agent."""
screen_buffer = np.zeros((x_max, y_max, 3), np.uint8)
action = np.array([0, 0, 0, 0])
action_spec = env.action_spec()
sum_rewards = 0
sum_rewards_at_goal = 0
previous_goal_id = None
while True:
# Take a step given the previous action and record the reward.
observation, reward, done, info = env.step(action)
sum_rewards += reward
if (reward > 0) and (info['current_goal_id'] is not previous_goal_id):
sum_rewards_at_goal += reward
previous_goal_id = info['current_goal_id']
if done:
print('Episode reward: {}'.format(sum_rewards))
if FLAGS.stats_path:
with open(FLAGS.stats_path, 'a') as f:
f.write(str(sum_rewards) + '\t' + str(sum_rewards_at_goal) + '\n')
sum_rewards = 0
sum_rewards_at_goal = 0
# Determine the next pano and bearing to that pano.
current_pano_id = info['current_pano_id']
next_pano_id = info['next_pano_id']
bearing_info = info['bearing_to_next_pano']
bearing = observation['ground_truth_direction']
logging.info('Current pano: %s, next pano %s at %f (%f)',
current_pano_id, next_pano_id, bearing, bearing_info)
current_step = info.get('current_step', -1)
# Bearing-based navigation.
if bearing > FLAGS.horizontal_rot:
if bearing > FLAGS.horizontal_rot + 2 * FLAGS.horizontal_rot:
action = 3 * FLAGS.horizontal_rot * action_spec['horizontal_rotation']
else:
action = FLAGS.horizontal_rot * action_spec['horizontal_rotation']
elif bearing < -FLAGS.horizontal_rot:
if bearing < -FLAGS.horizontal_rot - 2 * FLAGS.horizontal_rot:
action = -3 * FLAGS.horizontal_rot * action_spec['horizontal_rotation']
else:
action = -FLAGS.horizontal_rot * action_spec['horizontal_rotation']
else:
action = action_spec['move_forward']
# Draw the observations (view, graph, thumbnails, instructions).
view_image = observation['view_image_hwc']
graph_image = observation['graph_image_hwc']
screen_buffer[:FLAGS.width, :FLAGS.height, :] = view_image.swapaxes(0, 1)
screen_buffer[:FLAGS.width, FLAGS.height:(FLAGS.height*2), :] = (
graph_image.swapaxes(0, 1))
thumb_image = np.copy(observation['thumbnails'])
for k in range(FLAGS.max_instructions+1):
if k != current_step:
thumb_image[k, :, :, :] = thumb_image[k, :, :, :] / 2
thumb_image = thumb_image.reshape(
FLAGS.height * (FLAGS.max_instructions + 1), FLAGS.width, 3)
thumb_image = thumb_image.swapaxes(0, 1)
thumb_image = thumb_image[::subsampling, ::subsampling, :]
screen_buffer[FLAGS.width:(FLAGS.width+thumb_image.shape[0]),
0:thumb_image.shape[1],
:] = thumb_image
pygame.surfarray.blit_array(screen, screen_buffer)
instructions = observation['instructions'].decode('utf-8')
instructions = instructions.split('|')
instructions.append('[goal]')
x_min = x_max - FLAGS.width_text + 10
y = 10
for k in range(len(instructions)):
instruction = instructions[k]
if k == current_step:
color = COLOR_WAYPOINT
elif k == len(instructions) - 1:
color = COLOR_GOAL
else:
color = COLOR_INSTRUCTION
blit_instruction(screen, instruction, font, color, x_min, y, x_max)
y += int(FLAGS.height / subsampling)
pygame.display.update()
for event in pygame.event.get():
if (event.type == pygame.QUIT or
(event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
filename = time.strftime('/tmp/oracle_agent_%Y%m%d_%H%M%S.bmp')
pygame.image.save(screen, filename)
def main(argv):
config = {'width': FLAGS.width,
'height': FLAGS.height,
'field_of_view': FLAGS.field_of_view,
'status_height': 0,
'graph_width': FLAGS.width,
'graph_height': FLAGS.height,
'graph_zoom': FLAGS.graph_zoom,
'graph_black_on_white': FLAGS.graph_black_on_white,
'show_shortest_path': FLAGS.show_shortest_path,
'calculate_ground_truth': True,
'goal_timeout': FLAGS.frame_cap,
'frame_cap': FLAGS.frame_cap,
'full_graph': (FLAGS.start_pano == ''),
'start_pano': FLAGS.start_pano,
'min_graph_depth': FLAGS.graph_depth,
'max_graph_depth': FLAGS.graph_depth,
'reward_at_waypoint': FLAGS.reward_at_waypoint,
'reward_at_goal': FLAGS.reward_at_goal,
'instruction_file': FLAGS.instruction_file,
'num_instructions': FLAGS.num_instructions,
'max_instructions': FLAGS.max_instructions,
'proportion_of_panos_with_coins': 0.0,
'action_spec': 'streetlearn_fast_rotate',
'observations': ['view_image_hwc', 'graph_image_hwc', 'yaw',
'thumbnails', 'instructions',
'ground_truth_direction']}
# Configure game and environment.
config = default_config.ApplyDefaults(config)
if FLAGS.game == 'goal_instruction_game':
game = goal_instruction_game.GoalInstructionGame(config)
elif FLAGS.game == 'incremental_instruction_game':
game = incremental_instruction_game.IncrementalInstructionGame(config)
elif FLAGS.game == 'step_by_step_instruction_game':
game = step_by_step_instruction_game.StepByStepInstructionGame(config)
else:
print('Unknown game: [{}]'.format(FLAGS.game))
print('Run instruction_following_oracle_agent --help.')
return
env = streetlearn.StreetLearn(FLAGS.dataset_path, config, game)
env.reset()
# Configure pygame.
pygame.init()
pygame.font.init()
subsampling = int(np.ceil((FLAGS.max_instructions + 1) / 2))
x_max = FLAGS.width + int(FLAGS.width / subsampling) + FLAGS.width_text
y_max = FLAGS.height * 2
logging.info('Rendering images at %dx%d, thumbnails subsampled by %d',
x_max, y_max, subsampling)
screen = pygame.display.set_mode((x_max, y_max))
font = pygame.font.SysFont('arial', FLAGS.font_size)
loop(env, screen, x_max, y_max, subsampling, font)
if __name__ == '__main__':
app.run(main)
|
streetlearn-master
|
streetlearn/python/ui/instruction_following_oracle_agent.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic oracle agent for StreetLearn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import copy
import time
import numpy as np
import pygame
from streetlearn.python.environment import courier_game
from streetlearn.python.environment import default_config
from streetlearn.python.environment import batched_streetlearn
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 4, 'Batch size.')
flags.DEFINE_integer('num_env_per_shared_cache', 4, 'Num env per shared cache.')
flags.DEFINE_integer('width', 168, 'Observation and map width.')
flags.DEFINE_integer('height', 168, 'Observation and map height.')
flags.DEFINE_integer('field_of_view', 60, 'Field of view.')
flags.DEFINE_integer('graph_zoom', 1, 'Zoom level.')
flags.DEFINE_boolean('graph_black_on_white', False,
'Show graph as black on white. False by default.')
flags.DEFINE_float('horizontal_rot', 22.5, 'Horizontal rotation step (deg).')
flags.DEFINE_string('dataset_path', None, 'Dataset path.')
flags.DEFINE_integer('max_cache_size', 30000, 'Max cache size.')
flags.DEFINE_string('start_pano', '',
'Pano at root of partial graph (default: full graph).')
flags.DEFINE_integer('graph_depth', 200, 'Depth of the pano graph.')
flags.DEFINE_integer('frame_cap', 1000, 'Number of frames / episode.')
flags.DEFINE_string('stats_path', None, 'Statistics path.')
flags.DEFINE_float('proportion_of_panos_with_coins', 0, 'Proportion of coins.')
flags.mark_flag_as_required('dataset_path')
TOL_BEARING = 30
def loop(env, screen):
"""Main loop of the oracle agent."""
action = []
sum_rewards = []
sum_rewards_at_goal = []
previous_goal_id = []
seen_pano_ids = []
for _ in range(FLAGS.batch_size):
action.append(np.array([0, 0, 0, 0]))
sum_rewards.append(0)
sum_rewards_at_goal.append(0)
previous_goal_id.append(None)
seen_pano_ids.append({})
action_spec = env.action_spec()
save_video = False
frame = 0
horizontal_rotation = action_spec['horizontal_rotation']
move_forward = action_spec['move_forward']
while True:
# Read the keyboard.
for event in pygame.event.get():
if (event.type == pygame.QUIT or
(event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
filename = time.strftime('/tmp/oracle_agent_%Y%m%d_%H%M%S.bmp')
pygame.image.save(screen, filename)
if event.key == pygame.K_v:
save_video = True
if event.key == pygame.K_c:
save_video = False
elif event.key == pygame.K_i:
for k in range(FLAGS.batch_size):
action[k] += action_spec['map_zoom']
print('zoom in')
elif event.key == pygame.K_o:
for k in range(FLAGS.batch_size):
action[k] -= action_spec['map_zoom']
print('zoom out')
# Take a step given the previous action.
observations, reward, done, info = env.step(action)
# Visualise the observations.
images = []
for k in range(FLAGS.batch_size):
view_image = observations['view_image_hwc'][k, ...]
graph_image = observations['graph_image_hwc'][k, ...]
image_k = np.concatenate((view_image, graph_image), axis=0)
images.append(image_k)
screen_buffer = np.concatenate(images, axis=1)
pygame.surfarray.blit_array(screen, screen_buffer.swapaxes(0, 1))
pygame.display.update()
# Save a video?
if save_video:
filename = time.strftime('/tmp/oracle_agent_video_%Y%m%d_%H%M%S')
filename += '_' + str(frame) + '.bmp'
pygame.image.save(screen, filename)
frame += 1
# Record the reward.
for k in range(FLAGS.batch_size):
sum_rewards[k] += reward[k]
if ((reward[k] > 0) and
(info[k]['current_goal_id'] is not previous_goal_id[k])):
sum_rewards_at_goal[k] += reward[k]
seen_pano_ids[k] = {}
previous_goal_id[k] = info[k]['current_goal_id']
if done[k]:
num_successes = info[k]['num_successes']
spl = info[k]['spl']
spl_without_last_goal = info[k]['spl_without_last_goal']
print('Episode [{}] reward: {}, goals: {}, SPL: {}/{}'.format(
k, sum_rewards[k], num_successes, spl, spl_without_last_goal))
if FLAGS.stats_path:
with open(FLAGS.stats_path, 'a') as f:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(
k, sum_rewards[k], sum_rewards_at_goal[k], num_successes, spl,
spl_without_last_goal))
sum_rewards[k] = 0
sum_rewards_at_goal[k] = 0
# logging.info('Cache size: %d', env.cache_size())
# Determine the next pano and bearing to that pano.
for k in range(FLAGS.batch_size):
current_pano_id = info[k]['current_pano_id']
next_pano_id = info[k]['next_pano_id']
bearing = info[k]['bearing_to_next_pano']
logging.info('Current pano %d: %s, next pano %s at %f, cache size %d',
k, current_pano_id, next_pano_id, bearing, env.cache_size[k])
# Maintain the count of pano visits, in case the agent gets stuck.
if current_pano_id in seen_pano_ids[k]:
seen_pano_ids[k][current_pano_id] += 1
else:
seen_pano_ids[k][current_pano_id] = 1
# Bearing-based navigation.
if bearing > TOL_BEARING:
if bearing > TOL_BEARING + 2 * FLAGS.horizontal_rot:
action[k] = copy.copy(3 * FLAGS.horizontal_rot * horizontal_rotation)
else:
action[k] = copy.copy(FLAGS.horizontal_rot * horizontal_rotation)
elif bearing < -TOL_BEARING:
if bearing < -TOL_BEARING - 2 * FLAGS.horizontal_rot:
action[k] = copy.copy(-3 * FLAGS.horizontal_rot * horizontal_rotation)
else:
action[k] = copy.copy(-FLAGS.horizontal_rot * horizontal_rotation)
else:
action[k] = copy.copy(move_forward)
# Sometimes, two panos B and C are close to each other, which causes
# cyclic loops: A -> C -> A -> C -> A... whereas agent wants to go
# A -> B. There is a simple strategy to get out of that A - C loop:
# detect that A has been visited a large number of times in the current
# trajectory, then instead of moving forward A -> B and ending up in C,
# directly jump to B. First, we check if the agent has spent more time
# in a pano than required to make a full U-turn...
if seen_pano_ids[k][current_pano_id] > (180.0 / FLAGS.horizontal_rot):
# ... then we teleport to the desired location and turn randomly.
logging.info('Teleporting from %s to %s', current_pano_id,
next_pano_id)
_ = env.goto(k, next_pano_id, np.random.randint(359))
def main(argv):
config = {'width': FLAGS.width,
'height': FLAGS.height,
'field_of_view': FLAGS.field_of_view,
'graph_width': FLAGS.width,
'graph_height': FLAGS.height,
'graph_zoom': FLAGS.graph_zoom,
'graph_black_on_white': FLAGS.graph_black_on_white,
'goal_timeout': FLAGS.frame_cap,
'frame_cap': FLAGS.frame_cap,
'full_graph': (FLAGS.start_pano == ''),
'start_pano': FLAGS.start_pano,
'min_graph_depth': FLAGS.graph_depth,
'max_graph_depth': FLAGS.graph_depth,
'max_cache_size': FLAGS.max_cache_size,
'proportion_of_panos_with_coins':
FLAGS.proportion_of_panos_with_coins,
'action_spec': 'streetlearn_fast_rotate',
'observations': ['view_image_hwc', 'graph_image_hwc', 'yaw',
'pitch']}
config = default_config.ApplyDefaults(config)
# Create as many configs and games as the batch size.
games = []
configs = []
for k in range(FLAGS.batch_size):
this_config = copy.copy(config)
this_config['seed'] = k
configs.append(this_config)
games.append(courier_game.CourierGame(this_config))
env = batched_streetlearn.BatchedStreetLearn(
FLAGS.dataset_path, configs, games,
num_env_per_shared_cache=FLAGS.num_env_per_shared_cache)
env.reset()
pygame.init()
screen = pygame.display.set_mode(
(FLAGS.width * FLAGS.batch_size, FLAGS.height * 2))
loop(env, screen)
if __name__ == '__main__':
app.run(main)
|
streetlearn-master
|
streetlearn/python/ui/batched_oracle_agent.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic human agent for StreetLearn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import pygame
from six.moves import range
from streetlearn.engine.python import color
from streetlearn.python.environment import coin_game
from streetlearn.python.environment import courier_game
from streetlearn.python.environment import default_config
from streetlearn.python.environment import goal_instruction_game
from streetlearn.python.environment import incremental_instruction_game
from streetlearn.python.environment import step_by_step_instruction_game
from streetlearn.python.environment import streetlearn
FLAGS = flags.FLAGS
flags.DEFINE_integer('width', 400, 'Observation and map width.')
flags.DEFINE_integer('height', 400, 'Observation and map height.')
flags.DEFINE_integer('field_of_view', 60, 'Field of view.')
flags.DEFINE_integer('graph_zoom', 1, 'Zoom level.')
flags.DEFINE_integer('width_text', 300, 'Text width.')
flags.DEFINE_integer('font_size', 16, 'Font size.')
flags.DEFINE_float('horizontal_rot', 10, 'Horizontal rotation step (deg).')
flags.DEFINE_float('vertical_rot', 10, 'Vertical rotation step (deg).')
flags.DEFINE_string('dataset_path', None, 'Dataset path.')
flags.DEFINE_string('instruction_file', None, 'Instruction path.')
flags.DEFINE_string(
'game',
'coin_game',
'Game name [coin_game|courier_game|goal_instruction_game|'
'incremental_instruction_game|step_by_step_instruction_game]')
flags.DEFINE_float('reward_at_waypoint', 0.5, 'Reward at waypoint.')
flags.DEFINE_float('reward_at_goal', 1.0, 'Reward at waypoint.')
flags.DEFINE_integer('num_instructions', 5, 'Number of instructions.')
flags.DEFINE_integer('max_instructions', 5, 'Maximum number of instructions.')
flags.DEFINE_string('start_pano', '',
'Pano at root of partial graph (default: full graph).')
flags.DEFINE_integer('graph_depth', 200, 'Depth of the pano graph.')
flags.DEFINE_boolean('graph_black_on_white', False,
'Show graph as black on white. False by default.')
flags.DEFINE_boolean('hide_goal', False,
'Whether to hide the goal location on the graph.')
flags.DEFINE_boolean('show_shortest_path', True,
'Whether to highlight the shortest path in the UI.')
flags.DEFINE_integer('frame_cap', 1000, 'Number of frames / episode.')
flags.DEFINE_float('proportion_of_panos_with_coins', 0.0, 'Proportion of coins.')
flags.mark_flag_as_required('dataset_path')
COLOR_WAYPOINT = (0, 178, 178)
COLOR_GOAL = (255, 0, 0)
COLOR_INSTRUCTION = (255, 255, 255)
def blit_instruction(screen, instruction, font, color, x_min, y, x_max):
"""Render and blit a multiline instruction onto the PyGame screen."""
words = instruction.split()
space_width = font.size(' ')[0]
x = x_min
for word in words:
word_surface = font.render(word, True, color)
word_width, word_height = word_surface.get_size()
if x + word_width >= x_max:
x = x_min
y += word_height
screen.blit(word_surface, (x, y))
x += word_width + space_width
def loop(env, screen, x_max, y_max, subsampling=None, font=None):
"""Main loop of the human agent."""
screen_buffer = np.zeros((x_max, y_max, 3), np.uint8)
action = np.array([0, 0, 0, 0])
action_spec = env.action_spec()
sum_rewards = 0
sum_rewards_at_goal = 0
previous_goal_id = None
while True:
# Take a step through the environment and record the reward.
observation, reward, done, info = env.step(action)
sum_rewards += reward
pano_id = env.current_pano_id
if reward > 0:
print('Collected reward of {} at {}'.format(reward, pano_id))
if done:
print('Episode reward: {}'.format(sum_rewards))
sum_rewards = 0
sum_rewards_at_goal = 0
# Draw the observations (view, graph).
observation = env.observation()
view_image = observation['view_image_hwc']
graph_image = observation['graph_image_hwc']
if FLAGS.game == 'coin_game' or FLAGS.game == 'courier_game':
screen_buffer = np.concatenate((view_image, graph_image), axis=0)
pygame.surfarray.blit_array(screen, screen_buffer.swapaxes(0, 1))
else:
# Draw extra observations (thumbnails, instructions).
screen_buffer[:FLAGS.width, :FLAGS.height, :] = view_image.swapaxes(0, 1)
screen_buffer[:FLAGS.width, FLAGS.height:(FLAGS.height*2), :] = (
graph_image.swapaxes(0, 1))
thumb_image = np.copy(observation['thumbnails'])
current_step = info.get('current_step', -1)
for k in range(FLAGS.max_instructions+1):
if k != current_step:
thumb_image[k, :, :, :] = thumb_image[k, :, :, :] / 2
thumb_image = thumb_image.reshape(
FLAGS.height * (FLAGS.max_instructions + 1), FLAGS.width, 3)
thumb_image = thumb_image.swapaxes(0, 1)
thumb_image = thumb_image[::subsampling, ::subsampling, :]
screen_buffer[FLAGS.width:(FLAGS.width+thumb_image.shape[0]),
0:thumb_image.shape[1],
:] = thumb_image
pygame.surfarray.blit_array(screen, screen_buffer)
instructions = observation['instructions'].decode('utf-8')
instructions = instructions.split('|')
instructions.append('[goal]')
x_min = x_max - FLAGS.width_text + 10
y = 10
for k in range(len(instructions)):
instruction = instructions[k]
if k == current_step:
color = COLOR_WAYPOINT
elif k == len(instructions) - 1:
color = COLOR_GOAL
else:
color = COLOR_INSTRUCTION
blit_instruction(screen, instruction, font, color, x_min, y, x_max)
y += int(FLAGS.height / subsampling)
pygame.display.update()
action_spec = env.action_spec()
action = np.array([0, 0, 0, 0])
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
print(pano_id + ': exit')
return
if event.key == pygame.K_SPACE:
action = action_spec['move_forward']
print(pano_id + ': move')
elif event.key == pygame.K_p:
filename = time.strftime('/tmp/human_agent_%Y%m%d_%H%M%S.bmp')
pygame.image.save(screen, filename)
elif event.key == pygame.K_i:
action = action_spec['map_zoom']
print(pano_id + ': zoom in')
elif event.key == pygame.K_o:
action = -1 * action_spec['map_zoom']
print(pano_id + ': zoom out')
elif event.key == pygame.K_a:
action = -FLAGS.horizontal_rot * action_spec['horizontal_rotation']
print(pano_id + ': rotate left')
elif event.key == pygame.K_d:
action = FLAGS.horizontal_rot * action_spec['horizontal_rotation']
print(pano_id + ': rotate right')
elif event.key == pygame.K_w:
action = -FLAGS.vertical_rot * action_spec['vertical_rotation']
print(pano_id + ': look up')
elif event.key == pygame.K_s:
action = FLAGS.vertical_rot * action_spec['vertical_rotation']
print(pano_id + ': look down')
elif event.type == pygame.KEYUP:
break
def main(argv):
config = {'width': FLAGS.width,
'height': FLAGS.height,
'field_of_view': FLAGS.field_of_view,
'graph_width': FLAGS.width,
'graph_height': FLAGS.height,
'graph_zoom': FLAGS.graph_zoom,
'graph_black_on_white': FLAGS.graph_black_on_white,
'show_shortest_path': FLAGS.show_shortest_path,
'goal_timeout': FLAGS.frame_cap,
'frame_cap': FLAGS.frame_cap,
'full_graph': (FLAGS.start_pano == ''),
'start_pano': FLAGS.start_pano,
'min_graph_depth': FLAGS.graph_depth,
'max_graph_depth': FLAGS.graph_depth,
'reward_at_waypoint': FLAGS.reward_at_waypoint,
'reward_at_goal': FLAGS.reward_at_goal,
'instruction_file': FLAGS.instruction_file,
'num_instructions': FLAGS.num_instructions,
'max_instructions': FLAGS.max_instructions,
'proportion_of_panos_with_coins':
FLAGS.proportion_of_panos_with_coins,
'observations': ['view_image_hwc', 'graph_image_hwc', 'yaw',
'thumbnails', 'pitch', 'instructions', 'latlng',
'target_latlng']}
if FLAGS.hide_goal:
config['color_for_goal'] = color.Color(1.0, 1.0, 1.0)
config = default_config.ApplyDefaults(config)
if FLAGS.game == 'coin_game':
game = coin_game.CoinGame(config)
elif FLAGS.game == 'courier_game':
game = courier_game.CourierGame(config)
elif FLAGS.game == 'goal_instruction_game':
game = goal_instruction_game.GoalInstructionGame(config)
elif FLAGS.game == 'incremental_instruction_game':
game = incremental_instruction_game.IncrementalInstructionGame(config)
elif FLAGS.game == 'step_by_step_instruction_game':
game = step_by_step_instruction_game.StepByStepInstructionGame(config)
else:
print('Unknown game: [{}]'.format(FLAGS.game))
print('Run with --help for available options.')
return
env = streetlearn.StreetLearn(FLAGS.dataset_path, config, game)
env.reset()
# Configure pygame.
pygame.init()
pygame.font.init()
if FLAGS.game == 'coin_game' or FLAGS.game == 'courier_game':
subsampling = 1
x_max = FLAGS.width
y_max = FLAGS.height * 2
logging.info('Rendering images at %dx%d', x_max, y_max)
else:
subsampling = int(np.ceil((FLAGS.max_instructions + 1) / 2))
x_max = FLAGS.width + int(FLAGS.width / subsampling) + FLAGS.width_text
y_max = FLAGS.height * 2
logging.info('Rendering images at %dx%d, thumbnails subsampled by %d',
x_max, y_max, subsampling)
screen = pygame.display.set_mode((x_max, y_max))
font = pygame.font.SysFont('arial', FLAGS.font_size)
loop(env, screen, x_max, y_max, subsampling, font)
if __name__ == '__main__':
app.run(main)
|
streetlearn-master
|
streetlearn/python/ui/human_agent.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic panorama scanning agent for StreetLearn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import time
import numpy as np
import pygame
from streetlearn.python.environment import coin_game
from streetlearn.python.environment import default_config
from streetlearn.python.environment import streetlearn
FLAGS = flags.FLAGS
flags.DEFINE_integer("width", 400, "Observation and map width.")
flags.DEFINE_integer("height", 400, "Observation and map height.")
flags.DEFINE_integer('field_of_view', 60, 'Field of view.')
flags.DEFINE_string("dataset_path", None, "Dataset path.")
flags.DEFINE_string("list_pano_ids_yaws", None, "List of pano IDs and yaws.")
flags.DEFINE_bool("save_images", False, "Save the images?")
flags.mark_flag_as_required("dataset_path")
flags.mark_flag_as_required("list_pano_ids_yaws")
def loop(env, screen, pano_ids_yaws):
"""Main loop of the scan agent."""
for (pano_id, yaw) in pano_ids_yaws:
# Retrieve the observation at a specified pano ID and heading.
logging.info('Retrieving view at pano ID %s and yaw %f', pano_id, yaw)
observation = env.goto(pano_id, yaw)
current_yaw = observation["yaw"]
view_image = observation["view_image_hwc"]
graph_image = observation["graph_image_hwc"]
screen_buffer = np.concatenate((view_image, graph_image), axis=0)
pygame.surfarray.blit_array(screen, screen_buffer.swapaxes(0, 1))
pygame.display.update()
for event in pygame.event.get():
if (event.type == pygame.QUIT or
(event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
return
if FLAGS.save_images:
filename = 'scan_agent_{}_{}.bmp'.format(pano_id, yaw)
pygame.image.save(screen, filename)
def main(argv):
config = {'width': FLAGS.width,
'height': FLAGS.height,
'field_of_view': FLAGS.field_of_view,
'graph_width': FLAGS.width,
'graph_height': FLAGS.height,
'graph_zoom': 1,
'full_graph': True,
'proportion_of_panos_with_coins': 0.0,
'action_spec': 'streetlearn_fast_rotate',
'observations': ['view_image_hwc', 'graph_image_hwc', 'yaw']}
with open(FLAGS.list_pano_ids_yaws, 'r') as f:
lines = f.readlines()
pano_ids_yaws = [(line.split('\t')[0], float(line.split('\t')[1]))
for line in lines]
config = default_config.ApplyDefaults(config)
game = coin_game.CoinGame(config)
env = streetlearn.StreetLearn(FLAGS.dataset_path, config, game)
env.reset()
pygame.init()
screen = pygame.display.set_mode((FLAGS.width, FLAGS.height * 2))
loop(env, screen, pano_ids_yaws)
if __name__ == '__main__':
app.run(main)
|
streetlearn-master
|
streetlearn/python/ui/scan_agent.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic oracle agent for StreetLearn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import copy
import time
import numpy as np
import pygame
from streetlearn.python.environment import courier_game
from streetlearn.python.environment import default_config
from streetlearn.python.environment import streetlearn
FLAGS = flags.FLAGS
flags.DEFINE_integer('width', 400, 'Observation and map width.')
flags.DEFINE_integer('height', 400, 'Observation and map height.')
flags.DEFINE_integer('field_of_view', 60, 'Field of view.')
flags.DEFINE_integer('graph_zoom', 1, 'Zoom level.')
flags.DEFINE_boolean('graph_black_on_white', False,
'Show graph as black on white. False by default.')
flags.DEFINE_float('horizontal_rot', 22.5, 'Horizontal rotation step (deg).')
flags.DEFINE_string('dataset_path', None, 'Dataset path.')
flags.DEFINE_integer('max_cache_size', 30000, 'Max cache size.')
flags.DEFINE_string('start_pano', '',
'Pano at root of partial graph (default: full graph).')
flags.DEFINE_integer('graph_depth', 200, 'Depth of the pano graph.')
flags.DEFINE_integer('frame_cap', 1000, 'Number of frames / episode.')
flags.DEFINE_string('stats_path', None, 'Statistics path.')
flags.DEFINE_float('proportion_of_panos_with_coins', 0, 'Proportion of coins.')
flags.mark_flag_as_required('dataset_path')
TOL_BEARING = 30
def loop(env, screen):
"""Main loop of the oracle agent."""
action = np.array([0, 0, 0, 0])
action_spec = env.action_spec()
sum_rewards = 0
sum_rewards_at_goal = 0
previous_goal_id = None
seen_pano_ids = {}
save_video = False
frame = 0
while True:
# Read the keyboard.
for event in pygame.event.get():
if (event.type == pygame.QUIT or
(event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
filename = time.strftime('/tmp/oracle_agent_%Y%m%d_%H%M%S.bmp')
pygame.image.save(screen, filename)
if event.key == pygame.K_v:
save_video = True
if event.key == pygame.K_c:
save_video = False
elif event.key == pygame.K_i:
action += action_spec['map_zoom']
print('zoom in')
elif event.key == pygame.K_o:
action -= action_spec['map_zoom']
print('zoom out')
# Take a step given the previous action.
observation, reward, done, info = env.step(action)
# Visualise the observations.
view_image = observation["view_image_hwc"]
graph_image = observation['graph_image_hwc']
screen_buffer = np.concatenate((view_image, graph_image), axis=0)
pygame.surfarray.blit_array(screen, screen_buffer.swapaxes(0, 1))
pygame.display.update()
# Save a video?
if save_video:
filename = time.strftime('/tmp/oracle_agent_video_%Y%m%d_%H%M%S')
filename += '_' + str(frame) + '.bmp'
pygame.image.save(screen, filename)
frame += 1
# Record the reward.
logging.info('Rewards: current %f, previous %f, target %s', reward,
observation['prev_reward'], observation['target_latlng'])
sum_rewards += reward
if (reward > 0) and (info['current_goal_id'] is not previous_goal_id):
sum_rewards_at_goal += reward
seen_pano_ids = {}
previous_goal_id = info['current_goal_id']
if done:
num_successes = info['num_successes']
spl = info['spl']
spl_without_last_goal = info['spl_without_last_goal']
print('Episode reward: {}, goals: {}, SPL: {}/{}'.format(
sum_rewards, num_successes, spl, spl_without_last_goal))
if FLAGS.stats_path:
with open(FLAGS.stats_path, 'a') as f:
f.write(str(sum_rewards) + '\t' + str(sum_rewards_at_goal) + '\t')
f.write(str(num_successes) + '\t' + str(spl) + '\t')
f.write(str(spl_without_last_goal) + '\n')
sum_rewards = 0
sum_rewards_at_goal = 0
# Determine the next pano and bearing to that pano.
current_pano_id = info['current_pano_id']
next_pano_id = info['next_pano_id']
bearing = info['bearing_to_next_pano']
logging.info('Current pano: %s, next pano %s at %f',
current_pano_id, next_pano_id, bearing)
# Maintain the count of pano visits, in case the agent gets stuck.
if current_pano_id in seen_pano_ids:
seen_pano_ids[current_pano_id] += 1
else:
seen_pano_ids[current_pano_id] = 1
# Bearing-based navigation.
if bearing > TOL_BEARING:
if bearing > TOL_BEARING + 2 * FLAGS.horizontal_rot:
action = copy.copy(
3 * FLAGS.horizontal_rot * action_spec['horizontal_rotation'])
else:
action = copy.copy(
FLAGS.horizontal_rot * action_spec['horizontal_rotation'])
elif bearing < -TOL_BEARING:
if bearing < -TOL_BEARING - 2 * FLAGS.horizontal_rot:
action = copy.copy(
-3 * FLAGS.horizontal_rot * action_spec['horizontal_rotation'])
else:
action = copy.copy(
-FLAGS.horizontal_rot * action_spec['horizontal_rotation'])
else:
action = copy.copy(action_spec['move_forward'])
# Sometimes, two panos B and C are close to each other, which causes
# cyclic loops: A -> C -> A -> C -> A... whereas agent wants to go A -> B.
# There is a simple strategy to get out of that A - C loop: detect that A
# has been visited a large number of times in the current trajectory, then
# instead of moving forward A -> B and ending up in C, directly jump to B.
# First, we check if the agent has spent more time in a pano than required
# to make a full U-turn...
if seen_pano_ids[current_pano_id] > (180.0 / FLAGS.horizontal_rot):
# ... then we teleport to the desired location and turn randomly.
logging.info('Teleporting from %s to %s', current_pano_id, next_pano_id)
_ = env.goto(next_pano_id, np.random.randint(359))
def main(argv):
config = {'width': FLAGS.width,
'height': FLAGS.height,
'field_of_view': FLAGS.field_of_view,
'graph_width': FLAGS.width,
'graph_height': FLAGS.height,
'graph_zoom': FLAGS.graph_zoom,
'graph_black_on_white': FLAGS.graph_black_on_white,
'goal_timeout': FLAGS.frame_cap,
'frame_cap': FLAGS.frame_cap,
'full_graph': (FLAGS.start_pano == ''),
'start_pano': FLAGS.start_pano,
'min_graph_depth': FLAGS.graph_depth,
'max_graph_depth': FLAGS.graph_depth,
'max_cache_size': FLAGS.max_cache_size,
'proportion_of_panos_with_coins':
FLAGS.proportion_of_panos_with_coins,
'action_spec': 'streetlearn_fast_rotate',
'observations': ['view_image_hwc', 'graph_image_hwc',
'target_latlng', 'prev_reward', 'prev_action']}
config = default_config.ApplyDefaults(config)
game = courier_game.CourierGame(config)
env = streetlearn.StreetLearn(FLAGS.dataset_path, config, game)
env.reset()
pygame.init()
screen = pygame.display.set_mode((FLAGS.width, FLAGS.height * 2))
loop(env, screen)
if __name__ == '__main__':
app.run(main)
|
streetlearn-master
|
streetlearn/python/ui/oracle_agent.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for Importance Weighted Actor-Learner Architectures.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
Note that this is a copy of the code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from streetlearn.python.agents.city_nav_agent import CityNavAgent
from streetlearn.python.agents.goal_nav_agent import GoalNavAgent
from streetlearn.python.agents.locale_pathway import LocalePathway
from streetlearn.python.agents.plain_agent import PlainAgent
|
streetlearn-master
|
streetlearn/python/agents/__init__.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance Weighted Actor-Learner Architecture goalless navigation agent.
Note that this is a modification of code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import rnn as contrib_rnn
nest = contrib_framework.nest
AgentOutput = collections.namedtuple('AgentOutput',
'action policy_logits baseline')
class PlainAgent(snt.RNNCore):
"""Agent with a simple residual convnet and LSTM."""
def __init__(self,
num_actions,
observation_names,
lstm_num_hiddens=256,
feed_action_and_reward=True,
max_reward=1.0,
name="streetlearn_core"):
"""Initializes an agent core designed to be used with A3C/IMPALA.
Supports a single visual observation tensor and outputs a single, scalar
discrete action with policy logits and a baseline value.
Args:
num_actions: Number of actions available.
observation_names: String with observation types separated by semi-colon.
lstm_num_hiddens: Number of hiddens in the LSTM core.
feed_action_and_reward: If True, the last action (one hot) and last reward
(scalar) will be concatenated to the torso.
max_reward: If `feed_action_and_reward` is True, the last reward will
be clipped to `[-max_reward, max_reward]`. If `max_reward`
is None, no clipping will be applied. N.B., this is different from
reward clipping during gradient descent, or reward clipping by the
environment.
name: Optional name for the module.
"""
super(PlainAgent, self).__init__(name='agent')
# Policy config
self._num_actions = num_actions
tf.logging.info('Agent trained on %d-action policy', self._num_actions)
# Append last reward (clipped) and last action?
self._feed_action_and_reward = feed_action_and_reward
self._max_reward = max_reward
# Policy LSTM core config
self._lstm_num_hiddens = lstm_num_hiddens
# Extract the observation names
observation_names = observation_names.split(';')
self._idx_frame = observation_names.index('view_image')
with self._enter_variable_scope():
tf.logging.info('LSTM core with %d hiddens', self._lstm_num_hiddens)
self._core = contrib_rnn.LSTMBlockCell(self._lstm_num_hiddens)
def initial_state(self, batch_size):
"""Return initial state with zeros, for a given batch size and data type."""
tf.logging.info("Initial state consists of the LSTM core initial state.")
return self._core.zero_state(batch_size, tf.float32)
def _torso(self, input_):
"""Processing of all the visual and language inputs to the LSTM core."""
# Extract the inputs
last_action, env_output = input_
last_reward, _, _, observation = env_output
if type(observation) == list:
frame = observation[self._idx_frame]
else:
frame = observation
# Convert to image to floats and normalise.
frame = tf.to_float(frame)
frame /= 255
# Feed image through convnet.
with tf.variable_scope('convnet'):
conv_out = frame
for i, (num_ch, num_blocks) in enumerate([(16, 2), (32, 2), (32, 2)]):
# Downscale.
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out = tf.nn.pool(
conv_out,
window_shape=[3, 3],
pooling_type='MAX',
padding='SAME',
strides=[2, 2])
# Residual block(s).
for j in range(num_blocks):
with tf.variable_scope('residual_%d_%d' % (i, j)):
block_input = conv_out
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out += block_input
# Fully connected layer.
conv_out = tf.nn.relu(conv_out)
conv_out = snt.BatchFlatten()(conv_out)
conv_out = snt.Linear(256)(conv_out)
conv_out = tf.nn.relu(conv_out)
# Concatenate outputs of the visual and instruction pathways.
if self._feed_action_and_reward:
# Append clipped last reward and one hot last action.
tf.logging.info('Append last reward clipped to: %f', self._max_reward)
clipped_last_reward = tf.expand_dims(
tf.clip_by_value(last_reward, -self._max_reward, self._max_reward),
-1)
tf.logging.info('Append last action (one-hot of %d)', self._num_actions)
one_hot_last_action = tf.one_hot(last_action, self._num_actions)
core_input = tf.concat(
[conv_out, clipped_last_reward, one_hot_last_action],
axis=1)
else:
core_input = conv_out
return core_input
def _head(self, core_output):
"""Build the head of the agent: linear policy and value function."""
policy_logits = snt.Linear(
self._num_actions, name='policy_logits')(
core_output)
baseline = tf.squeeze(snt.Linear(1, name='baseline')(core_output), axis=-1)
# Sample an action from the policy.
new_action = tf.multinomial(
policy_logits, num_samples=1, output_dtype=tf.int32)
new_action = tf.squeeze(new_action, 1, name='new_action')
return AgentOutput(new_action, policy_logits, baseline)
def _build(self, input_, core_state):
"""Assemble the network components."""
action, env_output = input_
actions, env_outputs = nest.map_structure(lambda t: tf.expand_dims(t, 0),
(action, env_output))
outputs, core_state = self.unroll(actions, env_outputs, core_state)
return nest.map_structure(lambda t: tf.squeeze(t, 0), outputs), core_state
@snt.reuse_variables
def unroll(self, actions, env_outputs, core_state):
"""Manual implementation of the network unroll."""
_, _, done, _ = env_outputs
torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))
# Note, in this implementation we can't use CuDNN RNN to speed things up due
# to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
# changed to implement snt.LSTMCell).
initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
core_output_list = []
for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
# If the episode ended, the core state should be reset before the next.
core_state = nest.map_structure(
functools.partial(tf.where, d), initial_core_state, core_state)
core_output, core_state = self._core(input_, core_state)
core_output_list.append(core_output)
return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state
|
streetlearn-master
|
streetlearn/python/agents/plain_agent.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the goal-driven StreetLearn CityNavAgent with auxiliary losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import sonnet as snt
import streetlearn.python.agents.goal_nav_agent as goal_nav_agent
import streetlearn.python.agents.locale_pathway as locale_pathway
from tensorflow.contrib import rnn as contrib_rnn
class CityNavAgent(goal_nav_agent.GoalNavAgent):
"""Core with A2C/A3C-compatible outputs for simple visual observations."""
def __init__(self,
num_actions,
observation_names,
goal_type='target_latlng',
heading_stop_gradient=False,
heading_num_hiddens=256,
heading_num_bins=16,
xy_stop_gradient=True,
xy_num_hiddens=256,
xy_num_bins_lat=32,
xy_num_bins_lng=32,
target_xy_stop_gradient=True,
locale_lstm_num_hiddens=256,
dropout=0.5,
locale_bottleneck_num_hiddens=64,
skip_connection=True,
policy_lstm_num_hiddens=256,
feed_action_and_reward=True,
max_reward=1.0,
name="streetlearn_core"):
"""Initializes an agent core designed to be used with A3C/IMPALA.
Supports a single visual observation tensor and goal instruction tensor and
outputs a single, scalar discrete action with policy logits and a baseline
value, as well as the agent heading, XY position and target XY predictions.
Args:
num_actions: Number of actions available.
observation_names: String with observation names separated by semi-colon.
goal_type: String with the name of the target observation field, can be
`target_latlng` or `target_landmarks`.
heading_stop_gradient: Boolean for stopping gradient between the LSTM core
and the heading prediction MLP.
heading_num_hiddens: Number of hiddens in the heading prediction MLP.
heading_num_bins: Number of outputs in the heading prediction MLP.
xy_stop_gradient: Boolean for stopping gradient between the LSTM core
and the XY position prediction MLP.
xy_num_hiddens: Number of hiddens in the XY position prediction MLP.
xy_num_bins_lat: Number of lat outputs in the XY position prediction MLP.
xy_num_bins_lng: Number of lng outputs in the XY position prediction MLP.
target_xy_stop_gradient: Boolean for stopping gradient between the LSTM
core and the target XY position prediction MLP.
locale_lstm_num_hiddens: Number of hiddens in the locale pathway core.
dropout: Dropout probabibility after the locale pathway.
locale_bottleneck_num_hiddens: Number of hiddens in the bottleneck after
the locale pathway.
skip_connection: Is there a direct connection from convnet to policy LSTM?
policy_lstm_num_hiddens: Number of hiddens in the policy LSTM core.
feed_action_and_reward: If True, the last action (one hot) and last reward
(scalar) will be concatenated to the torso.
max_reward: If `feed_action_and_reward` is True, the last reward will
be clipped to `[-max_reward, max_reward]`. If `max_reward`
is None, no clipping will be applied. N.B., this is different from
reward clipping during gradient descent, or reward clipping by the
environment.
name: Optional name for the module.
"""
super(CityNavAgent, self).__init__(
num_actions,
observation_names,
goal_type,
heading_stop_gradient,
heading_num_hiddens,
heading_num_bins,
xy_stop_gradient,
xy_num_hiddens,
xy_num_bins_lat,
xy_num_bins_lng,
target_xy_stop_gradient,
dropout,
lstm_num_hiddens=locale_lstm_num_hiddens,
feed_action_and_reward=feed_action_and_reward,
max_reward=max_reward,
name=name)
# Skip connection for convnet, short-circuiting the global pathway?
self._skip_connection = skip_connection
tf.logging.info("Convnet skip connection? " + str(self._skip_connection))
with self._enter_variable_scope():
# Recurrent policy LSTM core of the agent.
tf.logging.info('LSTM core with %d hiddens', policy_lstm_num_hiddens)
self._policy_lstm = contrib_rnn.LSTMBlockCell(
policy_lstm_num_hiddens, name="policy_lstm")
# Add an optional bottleneck after the global LSTM
if locale_bottleneck_num_hiddens > 0:
self._locale_bottleneck = snt.nets.MLP(
output_sizes=(locale_bottleneck_num_hiddens,),
activation=tf.nn.tanh,
activate_final=True,
name="locale_bottleneck")
tf.logging.info("Auxiliary global pathway bottleneck with %d hiddens",
locale_bottleneck_num_hiddens)
else:
self._locale_bottleneck = tf.identity
def initial_state(self, batch_size):
"""Returns an initial state with zeros, for a batch size and data type."""
tf.logging.info("Initial state consists of the locale pathway and policy "
"LSTM core initial states.")
initial_state_list = []
initial_state_list.append(self._policy_lstm.zero_state(
batch_size, tf.float32))
initial_state_list.append(self._locale_pathway.initial_state(batch_size))
return tuple(initial_state_list)
def _core(self, core_input, core_state):
"""Assemble the recurrent core network components."""
(conv_output, action_reward, goal) = core_input
# Get the states
policy_state, locale_state = core_state
# Locale-specific pathway
locale_input = conv_output
locale_output, locale_state = self._locale_pathway((locale_input, goal),
locale_state)
(lstm_output, heading_output, xy_output, target_xy_output) = locale_output
# Policy LSTM
policy_input = self._locale_bottleneck(lstm_output)
if self._skip_connection:
policy_input = tf.concat([policy_input, conv_output], axis=1)
if self._feed_action_and_reward:
policy_input = tf.concat([policy_input, action_reward], axis=1)
policy_input = tf.identity(policy_input, name="policy_input")
policy_output, policy_state = self._policy_lstm(policy_input, policy_state)
core_output = (policy_output, heading_output, xy_output, target_xy_output)
core_state_list = []
core_state_list.append(policy_state)
core_state_list.append(locale_state)
core_state = tuple(core_state_list)
return core_output, core_state
|
streetlearn-master
|
streetlearn/python/agents/city_nav_agent.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance Weighted Actor-Learner Architecture goalless navigation agent.
Note that this is a modification of code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow.compat.v1 as tf
import streetlearn.python.agents.locale_pathway as locale_pathway
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
AgentOutput = collections.namedtuple(
"AgentOutput", "action policy_logits baseline heading xy target_xy")
class GoalNavAgent(snt.RNNCore):
"""Agent with a simple residual convnet and LSTM."""
def __init__(self,
num_actions,
observation_names,
goal_type='target_latlng',
heading_stop_gradient=False,
heading_num_hiddens=256,
heading_num_bins=16,
xy_stop_gradient=True,
xy_num_hiddens=256,
xy_num_bins_lat=32,
xy_num_bins_lng=32,
target_xy_stop_gradient=True,
dropout=0.5,
lstm_num_hiddens=256,
feed_action_and_reward=True,
max_reward=1.0,
name="streetlearn_core"):
"""Initializes an agent core designed to be used with A3C/IMPALA.
Supports a single visual observation tensor and goal instruction tensor and
outputs a single, scalar discrete action with policy logits and a baseline
value, as well as the agent heading prediction.
Args:
num_actions: Number of actions available.
observation_names: String with observation names separated by semi-colon.
goal_type: String with the name of the target observation field, can be
`target_latlng` or `target_landmarks`.
heading_stop_gradient: Boolean for stopping gradient between the LSTM core
and the heading prediction MLP.
heading_num_hiddens: Number of hiddens in the heading prediction MLP.
heading_num_bins: Number of outputs in the heading prediction MLP.
xy_stop_gradient: Boolean for stopping gradient between the LSTM core
and the XY position prediction MLP.
xy_num_hiddens: Number of hiddens in the XY position prediction MLP.
xy_num_bins_lat: Number of lat outputs in the XY position prediction MLP.
xy_num_bins_lng: Number of lng outputs in the XY position prediction MLP.
target_xy_stop_gradient: Boolean for stopping gradient between the LSTM
core and the target XY position prediction MLP.
dropout: Dropout probabibility after the locale pathway.
lstm_num_hiddens: Number of hiddens in the LSTM core.
feed_action_and_reward: If True, the last action (one hot) and last reward
(scalar) will be concatenated to the torso.
max_reward: If `feed_action_and_reward` is True, the last reward will
be clipped to `[-max_reward, max_reward]`. If `max_reward`
is None, no clipping will be applied. N.B., this is different from
reward clipping during gradient descent, or reward clipping by the
environment.
name: Optional name for the module.
"""
super(GoalNavAgent, self).__init__(name='agent')
# Policy config
self._num_actions = num_actions
tf.logging.info('Agent trained on %d-action policy', self._num_actions)
# Append last reward (clipped) and last action?
self._feed_action_and_reward = feed_action_and_reward
self._max_reward = max_reward
# Policy LSTM core config
self._lstm_num_hiddens = lstm_num_hiddens
# Extract the observation names
observation_names = observation_names.split(';')
self._idx_frame = observation_names.index('view_image')
tf.logging.info('Looking for goal of type %s', goal_type)
self._idx_goal = observation_names.index(goal_type)
with self._enter_variable_scope():
# Convnet
self._convnet = snt.nets.ConvNet2D(
output_channels=(16, 32),
kernel_shapes=(8, 4),
strides=(4, 2),
paddings=[snt.VALID],
activation=tf.nn.relu,
activate_final=True)
# Recurrent LSTM core of the agent.
tf.logging.info('Locale pathway LSTM core with %d hiddens',
self._lstm_num_hiddens)
self._locale_pathway = locale_pathway.LocalePathway(
heading_stop_gradient, heading_num_hiddens, heading_num_bins,
xy_stop_gradient, xy_num_hiddens, xy_num_bins_lat, xy_num_bins_lng,
target_xy_stop_gradient, lstm_num_hiddens, dropout)
def initial_state(self, batch_size):
"""Return initial state with zeros, for a given batch size and data type."""
tf.logging.info("Initial state consists of the LSTM core initial state.")
return self._locale_pathway.initial_state(batch_size)
def _torso(self, input_):
"""Processing of all the visual and language inputs to the LSTM core."""
# Extract the inputs
last_action, env_output = input_
last_reward, _, _, observation = env_output
frame = observation[self._idx_frame]
goal = observation[self._idx_goal]
goal = tf.to_float(goal)
# Convert to image to floats and normalise.
frame = tf.to_float(frame)
frame = snt.FlattenTrailingDimensions(dim_from=3)(frame)
frame /= 255.0
# Feed image through convnet.
with tf.variable_scope('convnet'):
# Convolutional layers.
conv_out = self._convnet(frame)
# Fully connected layer.
conv_out = snt.BatchFlatten()(conv_out)
conv_out = snt.Linear(256)(conv_out)
conv_out = tf.nn.relu(conv_out)
# Concatenate outputs of the visual and instruction pathways.
if self._feed_action_and_reward:
# Append clipped last reward and one hot last action.
tf.logging.info('Append last reward clipped to: %f', self._max_reward)
clipped_last_reward = tf.expand_dims(
tf.clip_by_value(last_reward, -self._max_reward, self._max_reward),
-1)
tf.logging.info('Append last action (one-hot of %d)', self._num_actions)
one_hot_last_action = tf.one_hot(last_action, self._num_actions)
tf.logging.info('Append goal:')
tf.logging.info(goal)
action_and_reward = tf.concat([clipped_last_reward, one_hot_last_action],
axis=1)
else:
action_and_reward = tf.constant([0], dtype=tf.float32)
return conv_out, action_and_reward, goal
def _core(self, core_input, core_state):
"""Assemble the recurrent core network components."""
(conv_output, action_reward, goal) = core_input
locale_input = tf.concat([conv_output, action_reward], axis=1)
core_output, core_state = self._locale_pathway((locale_input, goal),
core_state)
return core_output, core_state
def _head(self, policy_input, heading, xy, target_xy):
"""Build the head of the agent: linear policy and value function, and pass
the auxiliary outputs through.
"""
# Linear policy and value function.
policy_logits = snt.Linear(
self._num_actions, name='policy_logits')(policy_input)
baseline = tf.squeeze(snt.Linear(1, name='baseline')(policy_input), axis=-1)
# Sample an action from the policy.
new_action = tf.multinomial(
policy_logits, num_samples=1, output_dtype=tf.int32)
new_action = tf.squeeze(new_action, 1, name='new_action')
return AgentOutput(
new_action, policy_logits, baseline, heading, xy, target_xy)
def _build(self, input_, core_state):
"""Assemble the network components."""
action, env_output = input_
actions, env_outputs = nest.map_structure(lambda t: tf.expand_dims(t, 0),
(action, env_output))
outputs, core_state = self.unroll(actions, env_outputs, core_state)
return nest.map_structure(lambda t: tf.squeeze(t, 0), outputs), core_state
@snt.reuse_variables
def unroll(self, actions, env_outputs, core_state):
"""Manual implementation of the network unroll."""
_, _, done, _ = env_outputs
torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))
tf.logging.info(torso_outputs)
conv_outputs, actions_and_rewards, goals = torso_outputs
# Note, in this implementation we can't use CuDNN RNN to speed things up due
# to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
# changed to implement snt.LSTMCell).
initial_core_state = self.initial_state(tf.shape(actions)[1])
policy_input_list = []
heading_output_list = []
xy_output_list = []
target_xy_output_list = []
for torso_output_, action_and_reward_, goal_, done_ in zip(
tf.unstack(conv_outputs),
tf.unstack(actions_and_rewards),
tf.unstack(goals),
tf.unstack(done)):
# If the episode ended, the core state should be reset before the next.
core_state = nest.map_structure(
functools.partial(tf.where, done_), initial_core_state, core_state)
core_output, core_state = self._core(
(torso_output_, action_and_reward_, goal_), core_state)
policy_input_list.append(core_output[0])
heading_output_list.append(core_output[1])
xy_output_list.append(core_output[2])
target_xy_output_list.append(core_output[3])
head_output = snt.BatchApply(self._head)(tf.stack(policy_input_list),
tf.stack(heading_output_list),
tf.stack(xy_output_list),
tf.stack(target_xy_output_list))
return head_output, core_state
|
streetlearn-master
|
streetlearn/python/agents/goal_nav_agent.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the locale-specific core for StreetLearn agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow.compat.v1 as tf
# Outputs of the global city-specific pathway
LocalePathwayOutput = collections.namedtuple(
"LocalePathwayOutput", ["lstm_output", "heading", "xy", "target_xy"])
class LocalePathway(snt.RNNCore):
"""City-specific locale core, operating on visual embeddings."""
def __init__(self,
heading_stop_gradient=False,
heading_num_hiddens=256,
heading_num_bins=16,
xy_stop_gradient=True,
xy_num_hiddens=256,
xy_num_bins_lat=32,
xy_num_bins_lng=32,
target_xy_stop_gradient=True,
lstm_num_hiddens=256,
dropout=0.5,
name="locale_pathway_core"):
"""Initializes a city-specific global localisation core,
operating on visual embeddings and target positions.
Supports a single embedding tensor and a single target position tensor,
and outputs a single hidden state as well as auxiliary localisation outputs.
Relies on a recurrent LSTM core.
Args:
aux_config: ConfigDict with additional ConfigDict for auxiliary tasks.
name: Optional name for the module.
Returns:
"""
super(LocalePathway, self).__init__(name=name)
self._heading_stop_gradient = heading_stop_gradient
self._xy_stop_gradient = xy_stop_gradient
self._target_xy_stop_gradient = target_xy_stop_gradient
tf.logging.info("Stop gradient? heading:%s, XY:%s and target XY:%s",
str(heading_stop_gradient), str(xy_stop_gradient),
str(target_xy_stop_gradient))
self._lstm_num_hiddens = lstm_num_hiddens
tf.logging.info("Number of hiddens in locale-specific LSTM: %d",
lstm_num_hiddens)
self._dropout = dropout
tf.logging.info("Dropout after LSTM: %f", dropout)
with self._enter_variable_scope():
# Add an LSTM for global landmark, heading and XY prediction tasks
tf.logging.info("Auxiliary global pathway LSTM with %d hiddens",
self._lstm_num_hiddens)
assert(self._lstm_num_hiddens > 0)
self._lstm = tf.contrib.rnn.LSTMBlockCell(self._lstm_num_hiddens,
name="global_pathway_lstm")
# Add an MLP head for absolute heading (north) bin prediction
tf.logging.info("%d-bin absolute heading prediction with %s hiddens",
heading_num_bins,
heading_num_hiddens)
self._heading_logits = snt.nets.MLP(
output_sizes=(heading_num_hiddens, heading_num_bins),
activate_final=False,
name="heading_logits")
# Add an MLP head for XY location bin prediction
xy_num_bins = xy_num_bins_lat * xy_num_bins_lng
tf.logging.info("%d-bin XY location prediction (%d lat, %d lng)",
xy_num_bins, xy_num_bins_lat, xy_num_bins_lng)
tf.logging.info("with %s hiddens", xy_num_hiddens)
self._xy_logits = snt.nets.MLP(
output_sizes=(xy_num_hiddens, xy_num_bins),
activate_final=False,
name="xy_logits")
# Add an MLP head for XY target location bin prediction
tf.logging.info("%d-bin target XY location prediction (%d lat, %d lng)",
xy_num_bins, xy_num_bins_lat, xy_num_bins_lng)
tf.logging.info("with %s hiddens", xy_num_hiddens)
self._target_xy_logits = snt.nets.MLP(
output_sizes=(xy_num_hiddens, xy_num_bins),
activate_final=False,
name="target_xy_logits")
def _build(self, input_, state):
"""Connects the core into the graph.
This core is designed to be used for embeddings coming from a convnet.
Args:
embedding: The result of convnet embedding.
target_position: Representation of the target position.
state: The current state of the global LSTM component of the core.
Returns:
A tuple `(action, other_output), next_state`, where:
* `action` is the action selected by the core. An iterable containing
a single Tensor with shape `[batch, 1]`, which is the zero-based index
of the selected action.
* `other_output` is a namedtuple with fields `policy_logits` (a Tensor
of shape `[batch, num_actions]`) and `baseline` (a Tensor of shape
`[batch]`).
* `next_state` is the output of the LSTM component of the core.
"""
(embedding, target_position) = input_
with tf.name_scope("targets") as scope:
lstm_input = tf.concat([embedding, tf.cast(target_position,
dtype=tf.float32)],
axis=1)
# Global pathway tasks
with tf.name_scope("locale_pathway") as scope:
lstm_output, next_state = self._lstm(lstm_input, state)
# Heading decoding or prediction
if self._heading_stop_gradient:
input_heading = tf.stop_gradient(lstm_output,
name='heading_stop_gradient')
else:
input_heading = lstm_output
heading = self._heading_logits(input_heading)
# XY decoding or prediction
if self._xy_stop_gradient:
input_xy = tf.stop_gradient(lstm_output,
name='xy_stop_gradient')
else:
input_xy = lstm_output
xy = self._xy_logits(input_xy)
# Target XY decoding
if self._target_xy_stop_gradient:
input_target_xy = tf.stop_gradient(lstm_output,
name='target_xy_stop_gradient')
else:
input_target_xy = lstm_output
target_xy = self._target_xy_logits(input_target_xy)
# Add dropout
if self._dropout > 0:
lstm_output = tf.nn.dropout(lstm_output,
keep_prob=self._dropout,
name="lstm_output_dropout")
else:
lstm_output = tf.identity(lstm_output,
name="lstm_output_without_dropout")
# Outputs
core_output = LocalePathwayOutput(lstm_output=lstm_output,
heading=heading,
xy=xy,
target_xy=target_xy)
return core_output, next_state
def initial_state(self, batch_size):
"""Returns an initial state with zeros, for a batch size and data type."""
tf.logging.info("Initial state includes the locale LSTM")
return self._lstm.zero_state(batch_size, tf.float32)
|
streetlearn-master
|
streetlearn/python/agents/locale_pathway.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the curriculum-based courier task.
This is a version of the courier task that increases the distance to the
goal with each episode using the given annealing rate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import time
from streetlearn.python.environment import courier_game
_SECONDS_IN_HOUR = 3600
class CurriculumCourierGame(courier_game.CourierGame):
"""Coin game that gives extra reward for finding the goal pano. A courier goal
is randomly selected from panos in the graph according to a curriculum that
starts with panos within a maximum distance from the current agent position,
then anneals it with time. On success or timeout, a new goal is chosen. The
episode ends after a fixed episode length.
"""
def __init__(self, config):
"""Creates an instance of the RandomTaxiCurriculum level.
This coin game gives extra reward for finding the goal pano, and resets the
goal once the goal has been found (or on timeout). Panos can be assigned
rewards (coins) randomly and the agent will receive the reward the first
time they visit these panos. Goal panos are assigned within a circle whose
radius grows in time from min_goal_distance to max_goal_distance.
Args:
config: config dict of various settings.
"""
super(CurriculumCourierGame, self).__init__(config)
self._timestamp_start = config['timestamp_start_curriculum']
self._annealing_rate = config['annealing_rate_curriculum']
self._hours_curriculum_part_1 = config['hours_curriculum_part_1']
self._hours_curriculum_part_2 = config['hours_curriculum_part_2']
self._min_goal_distance = config['min_goal_distance_curriculum']
self._max_goal_distance = config['max_goal_distance_curriculum']
self._allowed_goal_distance = self._min_goal_distance
assert self._timestamp_start <= time.time()
assert self._annealing_rate > 0
assert self._hours_curriculum_part_1 >= 0
assert self._hours_curriculum_part_2 > 0
assert self._min_goal_distance < self._max_goal_distance
logging.info(
'Curriculum: starts at t=%d, dist <= %f in P1 (%f h)',
self._timestamp_start, self._min_goal_distance,
self._hours_curriculum_part_1)
logging.info(
'Curriculum: then %f < dist <= %f in P2 (%f h)',
self._min_goal_distance, self._max_goal_distance,
self._hours_curriculum_part_2)
logging.info('Curriculum: annealing rate: %f', self._annealing_rate)
def _update_curriculum_goal_distance(self):
"""Updates the allowed distance to the goal according to the curriculum."""
hours_train = max(0,
(time.time() - self._timestamp_start) / _SECONDS_IN_HOUR)
if hours_train <= self._hours_curriculum_part_1:
# During part 1 of the curriculum, sample goals within a minimal distance.
self._allowed_goal_distance = self._min_goal_distance
else:
# During part 2 of the curriculum, sample goals within a distance
# that grows from a minimum value to a maximum value.
numerator = hours_train - self._hours_curriculum_part_1
denom = self._hours_curriculum_part_2
time_factor = pow(min(1, max(0, numerator / denom)), self._annealing_rate)
self._allowed_goal_distance = (
(self._max_goal_distance - self._min_goal_distance
) * time_factor + self._min_goal_distance)
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Selects a random pano as goal destination.
If there are any coins, clears the set of touched panos and randomly
generates reward-yielding coins and populates pano_id_to_color.
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
# Update the allowed distance to the goal according to the curriculum.
self._update_curriculum_goal_distance()
# Populate the list of panos and assign optional coins to panos.
# Assign the goal location to one of the panos.
return super(CurriculumCourierGame, self).on_reset(streetlearn)
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = super(CurriculumCourierGame, self).get_info(streetlearn)
info['allowed_goal_distance'] = self._allowed_goal_distance
return info
def _sample_random_goal(self, streetlearn):
"""Randomly sets a new pano for the current goal according to a curriculum.
Args:
streetlearn: The StreetLearn environment.
"""
# Sample a goal among the pano ids that is within that distance.
goals = [goal for goal in streetlearn.graph
if ((goal != self._current_goal_id) and
(goal != streetlearn.current_pano_id))]
self._initial_distance_to_goal = float('inf')
while self._initial_distance_to_goal > self._allowed_goal_distance:
self._current_goal_id = np.random.choice(goals)
self._min_distance_reached = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, self._current_goal_id)
self._initial_distance_to_goal = self._min_distance_reached
logging.info(
'seed %d, frame %d: distance to goal: %f (max allowed: %f)',
streetlearn.seed, streetlearn.frame_count,
self._initial_distance_to_goal, self._allowed_goal_distance)
|
streetlearn-master
|
streetlearn/python/environment/curriculum_courier_game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Batched StreetLearn RL environment.
Episodes take place either in a mini-map created by performing a breadth-first
traversal of the StreetView graph starting from a starting location, or in
the entire fully-connected graph. Multiple StreetLearn environments are
instantiated, sharing the same cache of panoramas, action space and observation
specs.
Observations:
{
view_image: numpy array of dimension [batch_size, 3, height, width] containing
the street imagery.
graph_image: numpy array of dimension
[batch_size, 3, graph_height, graph_width] containing the map graph images.
view_image_hwc: numpy array of dimension [batch_size, height, width, 3]
containing the street imagery.
graph_image_hwc: numpy array, dimension
[batch_size, graph_height, graph_width, 3] containing the map graph images.
}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
from streetlearn.python.environment import streetlearn
class BatchedStreetLearn(object):
"""The Batched Streetlearn environment."""
def __init__(self, dataset_path, configs, games, num_env_per_shared_cache=1):
"""Construct the StreetLearn environment.
Args:
dataset_path: filesystem path where the dataset resides.
configs: list of batch_size elements, each element being a dictionary
containing various config settings, that will be extended with defaults
from default_config.DEFAULT_CONFIG.
games: list of batch_size instances of Game.
num_env_per_shared_cache: number of environments that share the same cache
By default equal to 1 (no cache sharing).
"""
# Check that batch_size is a multiple of num_env_per_shared_cache and that
# the action_spec, rotation_speed and observations are compatible between
# all environments.
batch_size = len(games)
assert batch_size > 0
assert num_env_per_shared_cache > 0
num_env_per_shared_cache = min(num_env_per_shared_cache, batch_size)
num_unique_node_caches = int(batch_size / num_env_per_shared_cache)
logging.info('batch_size: %d, num_env_per_shared_cache: %d',
batch_size, num_env_per_shared_cache)
logging.info('num_unique_node_caches: %d', num_unique_node_caches)
assert (num_env_per_shared_cache * num_unique_node_caches) == batch_size
assert len(configs) == batch_size
for k in range(1, batch_size):
assert configs[0]['action_spec'] == configs[k]['action_spec']
assert configs[0]['rotation_speed'] == configs[k]['rotation_speed']
observations = configs[k]['observations'].sort()
assert configs[0]['observations'].sort() == observations
# Instantiate the environments.
self._envs = []
k = 0
for i in range(num_unique_node_caches):
logging.info('Instantiating environment %d with a new node_cache', k)
self._envs.append(streetlearn.StreetLearn(
dataset_path, configs[k], games[k]))
k += 1
for j in range(1, num_env_per_shared_cache):
logging.info('Instantiating environment %d reusing last node_cache', k)
self._envs.append(streetlearn.StreetLearn(
dataset_path, configs[k], games[k], self._envs[k-1].engine))
k += 1
# Preallocate the matrices for the batch observations.
self._observation_batch = {}
for item in self._envs[0]._observations:
if item.observation_spec == [0]:
batched_shape = [batch_size,]
else:
batched_shape = [batch_size,] + item.observation_spec
batched_obs = np.zeros(batched_shape, dtype=item.observation_spec_dtypes)
self._observation_batch[item.name] = batched_obs
self._batch_size = batch_size
@property
def config(self):
return [env.config for env in self._envs]
@property
def seed(self):
return [env.seed for env in self._envs]
@property
def game(self):
return [env.game for env in self._envs]
@property
def field_of_view(self):
return [env.field_of_view for env in self._envs]
@property
def current_pano_id(self):
return [env.current_pano_id for env in self._envs]
@property
def frame_cap(self):
return [env.frame_cap for env in self._envs]
@frame_cap.setter
def frame_cap(self, value):
for env in self._envs:
env.frame_cap(value)
@property
def frame_count(self):
return [env.frame_count for env in self._envs]
def graph(self):
"""Return a list of graphs for all the environments."""
return [env.graph for env in self._envs]
@property
def neighbor_resolution(self):
return [env.neighbor_resolution for env in self._envs]
@property
def bbox_lat_min(self):
return [env.bbox_lat_min for env in self._envs]
@property
def bbox_lat_max(self):
return [env.bbox_lat_max for env in self._envs]
@property
def bbox_lng_min(self):
return [env.bbox_lng_min for env in self._envs]
@property
def bbox_lng_max(self):
return [env.bbox_lng_max for env in self._envs]
def observation_spec(self):
"""Returns the observation spec, dependent on the observation format."""
return {name: list(item.shape)
for name, item in self._observation_batch.items()}
def action_set(self):
"""Returns the set of actions, mapping integer actions to 1D arrays."""
return self._envs[0].action_set()
def action_spec(self):
"""Returns the action spec."""
return self._envs[0].action_spec()
def reset(self):
"""Start a new episode in all environments."""
for env in self._envs:
env.reset()
def goto(self, env_id, pano_id, yaw):
"""Go to a specific pano and yaw in the environment.
Args:
env_id: an integer ID for the environment.
pano_id: a string containing the ID of a pano.
yaw: a float with relative yaw w.r.t. north.
Returns:
observation: tuple with observations.
"""
return self._envs[env_id].goto(pano_id, yaw)
def step(self, action):
"""Takes a step in all the environments, and returns results in all envs.
Args:
action: a list of 1d arrays containing a combination of actions in each
environment.
Returns:
observation: tuple with batched observations for the last time step.
reward: list of scalar rewards at the last time step.
done: list of booleans indicating the end of an episode.
info: list of dictionaries with additional debug information.
"""
for action_k, env in zip(action, self._envs):
env.step(action_k)
# Return
return self.observation(), self.reward(), self.done(), self.info()
def observation(self):
"""Returns the batched observations for the last time step."""
for k in range(self._batch_size):
env = self._envs[k]
for name in self._observation_batch:
obs_k = env.observation()[name]
if obs_k is not None:
self._observation_batch[name][k, ...] = obs_k
return {name:item for name, item in self._observation_batch.items()}
def reward(self):
"""Returns the list of rewards for the last time step."""
return [env.reward() for env in self._envs]
def prev_reward(self):
"""Returns the list of rewards for the previous time step."""
return [env.prev_reward() for env in self._envs]
def prev_action(self):
"""Returns the list of actions for the previous time step."""
return [env.prev_action() for env in self._envs]
def done(self):
"""Return the list of flags indicating the end of the current episode."""
return [env.done() for env in self._envs]
def info(self):
"""Return a list of dictionaries with env. info at the current step."""
return [env.info() for env in self._envs]
def get_metadata(self, pano_id):
"""Return the metadata corresponding to the selected pano.
Args:
pano_id: a string containing the ID of a pano.
Returns:
metadata: a protocol buffer with the pano metadata.
"""
return self._envs[0].get_metadata(pano_id)
@property
def cache_size(self):
return [env._engine.GetNodeCacheSize() for env in self._envs]
def render(self):
"""Empty function, for compatibility with OpenAI Gym."""
pass
|
streetlearn-master
|
streetlearn/python/environment/batched_streetlearn.py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn basic level for the instruction-following task.
In this environment, the agent receives a reward for every waypoint it hits
as well as a larger reward for reaching the final goal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import numpy as np
from streetlearn.engine.python import color
from streetlearn.python.environment import coin_game
from streetlearn.python.environment import thumbnail_helper
TrajectoryStep = collections.namedtuple(
'TrajectoryStep',
'waypoint_index pano lat lng heading_deg length instruction')
Trajectory = collections.namedtuple('Trajectory', 'steps goal')
class InstructionsBase(coin_game.CoinGame):
"""Instruction following game."""
def __init__(self, config):
"""Creates an instance of the StreetLearn level.
Args:
config: config dict of various settings.
"""
super(InstructionsBase, self).__init__(config)
self._colors.update({
'goal': color.Color(*config['color_for_goal']),
'waypoint': color.Color(*config['color_for_waypoint']),
'shortest_path': color.Color(*config['color_for_shortest_path']),
})
self._reward_at_waypoint = config['reward_at_waypoint']
self._reward_at_goal = config['reward_at_goal']
self._instruction_file = config['instruction_file']
self._num_instructions = config['num_instructions']
self._max_instructions = config['max_instructions']
self._thumbnail_helper = thumbnail_helper.ThumbnailHelper()
self._thumbnails = np.zeros(
[self._max_instructions + 1, config['height'], config['width'], 3],
dtype=np.uint8)
logging.info('Using %d instructions', self._num_instructions)
logging.info('Padding to %d instructions', self._max_instructions)
self._instructions = []
self._step_counter = 1
self._reward_pano_id_list = {}
self._reward_pano_id_to_family = {}
self._reward_family = {}
self._pano_id_to_color = {}
self._goal_pano_id = None
self._trajectory = None
self._show_shortest_path = config['show_shortest_path']
self._calculate_ground_truth = config['calculate_ground_truth']
# Ground truth direction (for imitation learning agents).
self._gt_direction = 0
# Trajectories
self._num_trajectories = 0
self._trajectory_data = []
self._loaded_trajectories = False
def _load_trajectories(self):
"""Load the trajectories into memory."""
logging.info('Loading trajectories from %s', self._instruction_file)
steps = []
current_trajectory_index = 0
with open(self._instruction_file, 'r') as f:
for line in f:
tokens = line.strip().split('\t')
trajectory_index = int(tokens[0])
waypoint_index = int(tokens[1])
lat = float(tokens[2])
lng = float(tokens[3])
heading_deg = float(tokens[4])
length = float(tokens[5])
pano_id = tokens[6]
instruction = tokens[7]
step = TrajectoryStep(
waypoint_index=waypoint_index,
pano=pano_id,
lat=lat,
lng=lng,
heading_deg=heading_deg,
length=length,
instruction=instruction)
if trajectory_index != current_trajectory_index:
self._add_trajectory(steps)
steps = []
current_trajectory_index = trajectory_index
steps.append(step)
self._add_trajectory(steps)
logging.info('Loaded %d trajectories', self._num_trajectories)
self._loaded_trajectories = True
def _add_trajectory(self, steps):
"""Store a trajectory."""
num_steps = len(steps)
if num_steps > 0:
# Separate goal from waypoints.
goal = steps[num_steps-1]
steps = steps[:(num_steps-1)]
# Store the trajectory in a hashtable.
trajectory = Trajectory(steps=steps, goal=goal)
self._trajectory_data.append(trajectory)
self._num_trajectories += 1
if self._num_trajectories % 1000 == 0:
logging.info('Stored %d trajectories', self._num_trajectories)
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Selects a random trajectory, extracts the instructions and panos at goal and
waypoints, computes the shortest paths between each start, each waypoint and
the goal.
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
# Initialise graph of rewards and colors with coins
super(InstructionsBase, self).on_reset(streetlearn)
self._current_step = 0
self._step_counter = 1
self._step_by_pano = {}
self._pano_by_step = {}
self._reward_pano_id_list = {}
self._reward_pano_id_to_family = {}
self._reward_family = {}
self._pano_id_to_color = {}
self._num_steps_this_goal = 0
# Randomly sample a trajectory.
if self._loaded_trajectories == False:
self._load_trajectories()
trajectory = self._sample_trajectory(streetlearn)
start = max(len(trajectory.steps) - self._num_instructions, 0)
logging.info('Trajectory of length %d (max %d), starting at %d',
len(trajectory.steps), self._num_instructions, start)
num_steps = 0
start_pano_id = None
self._instructions = []
self._thumbnails[:] = 0
pano_list = []
for step in trajectory.steps[start:]:
pano_id = step.pano
pano_list.append(pano_id)
# Even if we do not take rewards for waypoints, we store them to keep
# track of the agent's position along the trajectory.
if num_steps == 0:
start_pano_id = pano_id
if num_steps > 0:
self._add_reward_to_pano(pano_id, self._reward_at_waypoint,
self._colors['waypoint'], streetlearn)
self._instructions.append(step.instruction)
# Fetch the thumbnail for the current step of the trajectory.
step_thumbnail = self._thumbnail_helper.get_thumbnail(
streetlearn, pano_id, step.heading_deg)
if step_thumbnail is not None:
self._thumbnails[num_steps] = step_thumbnail
if self._reward_at_waypoint:
logging.info('Waypoint %d at pano %s, yields reward of %f',
num_steps, pano_id, self._reward_at_waypoint)
else:
logging.info('Waypoint %d at pano %s', num_steps, pano_id)
num_steps += 1
# Set the goal.
self._goal_pano_id = trajectory.goal.pano
self._add_reward_to_pano(self._goal_pano_id, self._reward_at_goal,
self._colors['goal'], streetlearn)
pano_list.append(self._goal_pano_id)
# Store the previously defined coin rewards and colours
for pano_id in self._coin_pano_id_set:
self._add_coin_reward_to_pano(pano_id)
# Add goal pano thumbnail at the end.
goal_thumbnail = self._thumbnail_helper.get_thumbnail(
streetlearn, self._goal_pano_id, trajectory.goal.heading_deg)
if goal_thumbnail is not None:
self._thumbnails[num_steps] = goal_thumbnail
# Move and rotate player into start position.
streetlearn.engine.SetPosition(start_pano_id)
streetlearn.currentpano_id = start_pano_id
streetlearn.engine.RotateObserver(trajectory.steps[start].heading_deg, 0)
logging.info('From: %s (%f, %f), To: %s', start_pano_id,
trajectory.steps[start].lat,
trajectory.steps[start].lng, self._goal_pano_id)
logging.info('Trajectory with %d waypoints (goal included)', num_steps)
if self._calculate_ground_truth or self._show_shortest_path:
# Update the shortest path to the goal or first waypoint.
self._update_shortest_path(streetlearn, start_pano_id)
if self._show_shortest_path:
# Use the computed shortest path to color the panos.
self._color_shortest_path(streetlearn)
# By default, direction is forward.
self._gt_direction = 0
return self._pano_id_to_color
def _update_shortest_path(self, streetlearn, start_pano_id):
"""Update the target of the shortest paths and color panos along that path.
Args:
streetlearn: the streetlearn environment.
start_pano_id: a string for the current pano ID, for computing the optimal
path.
"""
step = self._current_step + 1
logging.info(self._pano_by_step)
logging.info('Reached step %d', step)
if step in self._pano_by_step:
target_pano_id = self._pano_by_step[step]
self._shortest_path, num_panos = self._shortest_paths(
streetlearn, target_pano_id, start_pano_id)
logging.info('Shortest path from %s to waypoint/goal %s covers %d panos',
start_pano_id, target_pano_id, num_panos)
def _color_shortest_path(self, streetlearn):
"""Color panos along the current shortest path to the current target.
Args:
streetlearn: the streetlearn environment.
"""
for pano_id in self._shortest_path:
self._pano_id_to_color.setdefault(pano_id, self._colors['shortest_path'])
@property
def trajectory(self):
return self._trajectory
def _sample_trajectory(self, streetlearn):
"""Sample a trajectory.
Args:
streetlearn: Streetlearn instance.
Returns:
trajectory object.
"""
trajectory_index = np.random.randint(len(self._trajectory_data))
self._trajectory = self._trajectory_data[trajectory_index]
return self.trajectory
def _add_reward_to_pano(self, pano_id, reward, color, streetlearn):
"""Add reward to a pano and all its neighbours.
Args:
pano_id: centroid pano id.
reward: Amount of reward to attach to this and neighbouring panos.
color: Color for the goal in the minimap.
streetlearn: Streetlearn instance
"""
# If this already has a reward indirectly through a neighbour, undo that.
if pano_id in self._reward_pano_id_list:
if self._reward_pano_id_to_family[pano_id] == pano_id:
# This was already made a reward field; update reward only.
for neighbor in self._reward_family[pano_id]:
# Replace reward and colour.
self._reward_pano_id_list[neighbor] = reward
self._pano_id_to_color[neighbor] = color
return
else:
# This was previously an indirect reward field.
# Remove from other family,: continue with default operation.
self._reward_family[self._reward_pano_id_to_family[pano_id]].remove(
pano_id)
self._reward_pano_id_to_family[pano_id] = None
# Define family around this id.
self._add_family(pano_id, streetlearn)
# Add reward and colour to family and links into family.
for neighbor in self._reward_family[pano_id]:
self._reward_pano_id_list[neighbor] = reward
self._reward_pano_id_to_family[neighbor] = pano_id
self._pano_id_to_color[neighbor] = color
def _add_coin_reward_to_pano(self, pano_id):
"""Add coin reward to a pano, but only if that pano has no reward yet.
Args:
pano_id: centroid pano id.
"""
if pano_id not in self._reward_pano_id_list:
self._reward_pano_id_list[pano_id] = self._reward_per_coin
self._reward_pano_id_to_family[pano_id] = pano_id
self._reward_family[pano_id] = {pano_id}
self._pano_id_to_color[pano_id] = self._colors['coin']
def _add_family(self, pano_id, streetlearn):
"""Add all neighbours of a pano to a list (family) of pano IDs.
Args:
pano_id: centroid pano id.
streetlearn: streetlearn graph for establishing neighbours.
"""
# If the pano is already part of a reward, do not mess with it.
if pano_id in self._reward_family:
return
# Assign each waypoint with a pano group counter. Used when adding waypoints
# one by one, in the order of the trajectory.
if pano_id not in self._step_by_pano:
logging.info('Added waypoint %d at pano %s', self._step_counter, pano_id)
self._step_by_pano[pano_id] = self._step_counter
self._pano_by_step[self._step_counter] = pano_id
self._step_counter += 1
# Add the same logic to the immediate neighbours of the pano.
self._reward_family[pano_id] = set({pano_id})
pano_metadata = streetlearn.engine.GetMetadata(pano_id)
for neighbor in pano_metadata.neighbors:
if neighbor.id not in self._reward_pano_id_to_family:
self._reward_pano_id_to_family[neighbor.id] = pano_id
self._reward_family[pano_id].add(neighbor.id)
def _check_reward(self, pano_id, streetlearn):
"""Check what reward the current pano yields, based on instructions.
Args:
pano_id: centroid pano id.
streetlearn: streetlearn graph for establishing neighbours.
Returns:
The reward for the current step.
"""
reward = 0
self._reached_goal = False
# Check if pano ID is in the list of pano IDs that yield rewards.
if pano_id in self._reward_pano_id_list:
reward = self._reward_pano_id_list[pano_id]
family_id = self._reward_pano_id_to_family[pano_id]
# If the family_id matches the goal, we have finished the trajectory.
previous_step = self._current_step
self._current_step = self._step_by_pano[family_id]
if family_id == self._goal_pano_id:
self._reached_goal = True
logging.info('%d: Completed level', streetlearn.frame_count)
# It appears the level does not end immediately, so we need to reset the
# step counter manually at this stage to prevent overflow.
self._current_step = 0
else:
logging.info('%d: Moved from %d to %d', streetlearn.frame_count,
previous_step, self._current_step)
if self._calculate_ground_truth or self._show_shortest_path:
# Update the shortest path to the goal or next waypoint.
self._update_shortest_path(streetlearn, pano_id)
if self._show_shortest_path:
# Use the computed shortest path to color the panos.
self._color_shortest_path(streetlearn)
for i in self._reward_family[family_id]:
del self._reward_pano_id_list[i]
del self._reward_pano_id_to_family[i]
del self._pano_id_to_color[i]
del self._reward_family[family_id]
# The value of the reward determines if the goal was reached and the
# episode can now end.
logging.info('%d: Picked up reward of %f at pano %s.',
streetlearn.frame_count, reward, pano_id)
# Add optional coin rewards.
if pano_id in self._coin_pano_id_set:
reward += self._reward_per_coin
self._coin_pano_id_set.remove(pano_id)
return reward
def get_reward(self, streetlearn):
"""Looks at current_pano_id and collects any reward found there.
Args:
streetlearn: the streetlearn environment.
Returns:
reward: the reward from the last step.
"""
# Calculate coin, waypoint and goal rewards, determine if end of episode.
current_pano_id = streetlearn.current_pano_id
reward = self._check_reward(current_pano_id, streetlearn)
self._num_steps_this_goal += 1
return reward
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = super(InstructionsBase, self).get_info(streetlearn)
info['num_steps_this_goal'] = self._num_steps_this_goal
info['current_step'] = self._current_step
info['current_goal_id'] = self._goal_pano_id
info['distance_to_goal'] = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, self._goal_pano_id)
info['reward_current_goal'] = self._reward_at_goal
if self._calculate_ground_truth:
current_pano_id = streetlearn.current_pano_id
next_pano_id = self._panos_to_goal[current_pano_id]
info['next_pano_id'] = next_pano_id
if next_pano_id:
bearing_to_next_pano = streetlearn.engine.GetPanoBearing(
current_pano_id, next_pano_id) - streetlearn.engine.GetYaw()
else:
bearing_to_next_pano = 0
info['bearing_to_next_pano'] = (bearing_to_next_pano + 180) % 360 - 180
return info
def done(self):
"""Returns a flag indicating the end of the current episode.
This game ends only at the end of the episode or if the goal is reached.
"""
if self._reached_goal:
self._reached_goal = False
return True
else:
return False
def thumbnails(self):
"""Returns extra observation thumbnails.
Args:
include_goal_thumb: Bool (default: False) of whether we add the goal.
Returns:
thumbnails: Thumbnails array of shape (batch_size, 3, h, w)
"""
return self._thumbnails
def instructions(self):
"""Returns instructions.
Args:
None
Returns:
instructions: string containing game specific instructions.
"""
return self._instructions
@property
def goal_id(self):
"""Returns the id of the goal Pano."""
return self._goal_pano_id
def on_step(self, streetlearn):
"""Update the ground truth direction to take and the set of touched panos.
Args:
streetlearn: the streetlearn environment.
"""
super(InstructionsBase, self).on_step(streetlearn)
if self._calculate_ground_truth:
# streetlearn.current_pano_id is not always updated.
current_pano_id = streetlearn.engine.GetPano().id
# What is the next pano and what is the direction to the pano?
next_pano_id = self._panos_to_goal[current_pano_id]
if next_pano_id:
yaw = streetlearn.engine.GetYaw()
bearing = streetlearn.engine.GetPanoBearing(
current_pano_id, next_pano_id) - yaw
self._gt_direction = (bearing + 180) % 360 - 180
else:
self._gt_direction = 0
def ground_truth_direction(self):
"""Returns the ground truth direction to take.
Returns:
ground_truth_direction: Float angle with the ground truth direction
to be taken for the agent to go towards the goal.
"""
return self._gt_direction
|
streetlearn-master
|
streetlearn/python/environment/instructions_base.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all StreetLearn levels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
from absl import logging
import six
from streetlearn.engine.python import color
EMPTY_THUMBMNAILS = np.empty((0, 3, 0, 0))
EMPTY_GT_DIRECTION = np.empty((0,))
# When computing which panos B_i are immediately reachable from a given pano A,
# we look at all panos B_i up to depth TOL_DEPTH in a graph whose root is A,
# with a difference in altitude less than TOL_ALT meters, and within a cone
# of TOL_BEARING degrees.
TOL_ALT = 2.0
TOL_DEPTH = 3
TOL_BEARING = 30
@six.add_metaclass(abc.ABCMeta)
class Game(object):
"""Base class for streetlearn levels."""
@abc.abstractmethod
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Args:
streetlearn: a StreetLearn instance.
Returns:
Dictionary that maps certain pano IDs to colors.
"""
return {}
@abc.abstractmethod
def on_step(self, streetlearn):
""""Gets called after StreetLearn:step().
Args:
streetlearn: a StreetLearn instance.
"""
@abc.abstractmethod
def get_reward(self, streetlearn):
"""Returns the reward from the last step.
Args:
streetlearn: a StreetLearn instance.
Returns:
reward: the reward from the last step.
"""
@abc.abstractmethod
def get_info(self, streetlearn):
""""Returns current information about the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
@property
def goal_id(self):
"""Returns the id of the goal pano, if there is one."""
return None
def ground_truth_direction(self):
"""Returns the float angle with the ground truth direction for the agent."""
return EMPTY_GT_DIRECTION
def thumbnails(self):
"""Returns observation thumbnails array of shape (batch_size, 3, h, w)."""
return EMPTY_THUMBMNAILS
def instructions(self):
"""Returns a string containing game specific instructions."""
return str()
def highlighted_panos(self):
"""Returns the list of highlighted panos and their colors."""
return {}
@property
def done(self):
"""Returns a flag indicating the end of the current episode."""
return True
def _compute_extended_graphs(self, streetlearn):
"""Compute an extended directed graph accessible to the StreeLearn agent.
Args:
streetlearn: the streetlearn environment.
"""
# Some datasets contain unidirectional connections: make them bidirectional.
for pano_id in six.iterkeys(streetlearn.graph):
neighbors = streetlearn.graph[pano_id]
for neighbor_id in neighbors:
neighbor_neighbors = streetlearn.graph[neighbor_id]
if pano_id not in neighbor_neighbors:
streetlearn.graph[neighbor_id].append(pano_id)
logging.info('Adding neighbor %s to %s to satisfy cycle',
pano_id, neighbor_id)
logging.info('Storing the altitudes of each pano for faster retrieval.')
altitudes = {}
for pano_id in six.iterkeys(streetlearn.graph):
altitudes[pano_id] = streetlearn.engine.GetMetadata(pano_id).pano.alt
logging.info('Computing the extended directed graph.')
self._extended_graph_from = {}
self._extended_graph_to = collections.defaultdict(list)
num_panos = 0
num_panos_total = len(streetlearn.graph)
num_edges_extended = 0
for current_id in six.iterkeys(streetlearn.graph):
# Find the neighbors up to depth 3 of the current pano, not separated
# by a drop of 2m in altitude.
visited = {}
queue_panos = [(current_id, 0)]
while queue_panos:
elem = queue_panos.pop(0)
pano_id = elem[0]
depth = elem[1]
current_alt = altitudes[current_id]
if depth > 0:
# Store the distance and bearing to each neighbor.
dist = streetlearn.engine.GetPanoDistance(current_id, pano_id)
bearing = streetlearn.engine.GetPanoBearing(current_id, pano_id)
visited[pano_id] = (dist, bearing)
# Look for new neighbors recursively.
if depth < TOL_DEPTH:
neighbors = streetlearn.graph[pano_id]
for neighbor_id in neighbors:
if neighbor_id not in visited:
neighbor_alt = altitudes[neighbor_id]
if depth == 0 or abs(neighbor_alt - current_alt) < TOL_ALT:
queue_panos.append((neighbor_id, depth+1))
if current_id in visited:
visited.pop(current_id)
# Select only neighbors that are the closest within a tolerance cone,
# and create extended graphs.
self._extended_graph_from[current_id] = {}
for pano_id, (dist, bearing) in six.iteritems(visited):
retain_pano_id = True
for other_id, (other_dist, other_bearing) in six.iteritems(visited):
if ((pano_id != other_id) and
(180 - abs(abs(bearing - other_bearing) - 180) < TOL_BEARING) and
(other_dist < dist)):
retain_pano_id = False
if retain_pano_id:
self._extended_graph_from[current_id][pano_id] = (dist, bearing)
num_edges_extended += 1
self._extended_graph_to[pano_id].append((current_id, dist, bearing))
num_panos += 1
if num_panos % 1000 == 0:
logging.info('Processed %d/%d panos, %d extended directed edges',
num_panos, num_panos_total, num_edges_extended)
def _bfs(self, queue_panos, graph, flagged, visited):
"""Compute the shortest paths using BFS given a queue and pano graph.
Args:
queue_panos: list of tuples (parent_pano_id, child_pano_id).
graph: dictionary with pano_id keys and lists of pano_id values.
flagged: dictionary with pano_id keys and boolean values.
visited: dictionary with child pano_id keys and parent pano id values.
Returns:
flagged: dictionary with pano_id keys and boolean values.
visited: dictionary with child pano_id keys and parent pano id values.
"""
while queue_panos:
# Mark the pano at the top of the queue as visited.
elem = queue_panos.pop(0)
current_pano_id = elem[0]
parent_pano_id = elem[1]
depth = elem[2]
visited[current_pano_id] = (parent_pano_id, depth)
# Add the neighbors of the pano.
if current_pano_id in graph:
neighbors = graph[current_pano_id]
for neighbor_id in neighbors:
if isinstance(neighbor_id, tuple):
neighbor_id = neighbor_id[0]
if neighbor_id not in flagged:
flagged.add(neighbor_id)
queue_panos.append((neighbor_id, current_pano_id, depth+1))
return flagged, visited
def _shortest_paths(self, streetlearn, target_pano_id, start_pano_id):
"""Compute the shortest paths from all the panos to a given start pano.
Args:
streetlearn: the streetlearn environment.
target_pano_id: a string for the target pano ID.
start_pano_id: a string for the current pano ID, for computing the optimal
path.
Returns:
shortest_path: dictionary containing (current_pano_id, next_pano_id)
as (key, value) pairs.
num_panos: integer number of panos in the shortest path.
"""
# The shortest path relies on the extended directed graph.
if not hasattr(self, '_extended_graph_from'):
self._compute_extended_graphs(streetlearn)
# Compute the shortest paths from all the panos to the target pano
# using the direct connection graph.
logging.info('Computing shortest paths to %s using BFS on direct graph',
target_pano_id)
flagged_direct = set([target_pano_id])
(_, visited_direct) = self._bfs(
[(target_pano_id, None, 0)], streetlearn.graph, flagged_direct, {})
# Compute the shortest paths from all the panos to the target pano
# using the extended (reachable) graph, with shortcuts.
logging.info('Computing shortest paths to %s using BFS on extended graph',
target_pano_id)
flagged_extended = set([target_pano_id])
(_, visited_extended) = self._bfs(
[(target_pano_id, None, 0)], self._extended_graph_to, flagged_extended,
{})
# Some panos may have been missed during the shortest path computation
# on the extended graph because of the preferential choice of one pano
# over the other. In order to make sure that there is a path from every
# pano of the graph to the goal pano, we backfill visited_extended
# with visited_direct, which is computed on the direct connection graph.
self._panos_to_goal = {}
for child, (parent, _) in six.iteritems(visited_direct):
if child in visited_extended:
(parent, _) = visited_extended[child]
self._panos_to_goal[child] = parent
# Extract the shortest path, from the current starting position, by
# following the panos to goal as computed by the BFS search that started
# from the goal.
current_pano_id = start_pano_id
list_panos = [current_pano_id]
next_pano_id = self._panos_to_goal[current_pano_id]
while next_pano_id:
list_panos.append(next_pano_id)
current_pano_id = next_pano_id
next_pano_id = self._panos_to_goal[current_pano_id]
# Because of the Street View direct graph connectivity and because of how
# the StreetLearn extended graph adds edges when two panos that are distant
# by up to 2 links can still be directly reached, we need to "iron out"
# the path at street intersections. This code transforms a -> b -> c -> d
# into a -> b' -> c' -> d if the latter is shorter (in metric distance)
# and a -> b -> c into a -> b' -> c' -> c if the latter is shorter.
shortest_path = {}
num_panos = 0
while num_panos < len(list_panos)-3:
a = list_panos[num_panos]
b = list_panos[num_panos+1]
c = list_panos[num_panos+2]
d = list_panos[num_panos+3]
skipped = 1
shortest_path[a] = b
if (a in self._extended_graph_from and
b in self._extended_graph_from and
c in self._extended_graph_from and
b in self._extended_graph_from[a] and
c in self._extended_graph_from[b] and
d in self._extended_graph_from[c]):
(dist_ab, _) = self._extended_graph_from[a][b]
(dist_bc, _) = self._extended_graph_from[b][c]
(dist_cd, _) = self._extended_graph_from[c][d]
dist_abc = dist_ab + dist_bc
dist_abcd = dist_abc + dist_cd
for b2 in six.iterkeys(self._extended_graph_from[a]):
if b2 != b:
for c2 in six.iterkeys(self._extended_graph_from[b2]):
for d2 in six.iterkeys(self._extended_graph_from[c2]):
if d2 == c or d2 == d:
(dist_ab2, _) = self._extended_graph_from[a][b2]
(dist_bc2, _) = self._extended_graph_from[b2][c2]
(dist_cd2, _) = self._extended_graph_from[c2][d2]
dist_abcd2 = dist_ab2 + dist_bc2 + dist_cd2
if d2 == c and dist_abcd2 < dist_abc:
self._panos_to_goal[a] = b2
self._panos_to_goal[b2] = c2
self._panos_to_goal[c2] = c
shortest_path[a] = b2
shortest_path[b2] = c2
shortest_path[c2] = c
logging.info('Replaced %s, %s, %s by %s, %s, %s, %s',
a, b, c, a, b2, c2, d2)
skipped = 2
if d2 == d and dist_abcd2 < dist_abcd:
self._panos_to_goal[a] = b2
self._panos_to_goal[b2] = c2
self._panos_to_goal[c2] = d
shortest_path[a] = b2
shortest_path[b2] = c2
shortest_path[c2] = d
logging.info('Replaced %s, %s, %s, %s by %s, %s, %s, %s',
a, b, c, d, a, b2, c2, d2)
skipped = 3
num_panos += skipped
while num_panos < len(list_panos)-1:
shortest_path[list_panos[num_panos]] = list_panos[num_panos+1]
num_panos +=1
return shortest_path, num_panos
|
streetlearn-master
|
streetlearn/python/environment/game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to handle the various StreetLearn observation types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
_METADATA_COUNT = 14
_NUM_HEADING_BINS = 16
_NUM_LAT_BINS = 32
_NUM_LNG_BINS = 32
def reshape_hwc(array, c, w, h):
"""Turn a planar RGB array into an interleaved one.
Args:
array: An array of bytes consisting the planar RGB image of size (c, w, h).
c: Number of channels in the image.
w: Width of the image.
h: Height of the image.
Returns:
An interleaved array of bytes shape shaped (h, w, c).
"""
arr = array.reshape(c, w * h)
return np.ravel((arr[0], arr[1], arr[2]),
order='F').reshape(h, w, 3)
class Observation(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for all observations."""
def __init__(self, streetlearn):
self._streetlearn = streetlearn
@abc.abstractproperty
def observation_spec(self):
"""The observation_spec for this observation."""
pass
@abc.abstractproperty
def observation(self):
"""The observation data."""
pass
@classmethod
def create(cls, name, streetlearn):
"""Dispatches an Observation based on `name`."""
observations = [ViewImage, GraphImage, ViewImageHWC, GraphImageHWC, Yaw,
Pitch, Metadata, TargetMetadata, LatLng, TargetLatLng,
YawLabel, Neighbors, LatLngLabel, TargetLatLngLabel,
Thumbnails, Instructions, GroundTruthDirection, PrevReward,
PrevAction]
dispatch = {o.name: o for o in observations}
try:
return dispatch[name](streetlearn)
except KeyError:
raise ValueError('No Observation named %s found' % name)
class ViewImage(Observation):
"""RGB pixel data of the view with shape (C, H, W) where C = 3."""
name = 'view_image'
observation_spec_dtypes = np.uint8
def __init__(self, streetlearn):
super(ViewImage, self).__init__(streetlearn)
self._width = streetlearn.config["width"]
self._height = streetlearn.config["height"]
self._depth = 3
self._buffer = np.empty(self._depth * self._width * self._height,
dtype=np.uint8)
@property
def observation_spec(self):
return [self._depth, self._height, self._width]
@property
def observation(self):
return self._streetlearn.engine.RenderObservation(self._buffer)
class ViewImageHWC(ViewImage):
"""RGB pixel data of the view with shape (H, W, C) where C = 3."""
name = 'view_image_hwc'
observation_spec_dtypes = np.uint8
@property
def observation_spec(self):
return [self._height, self._width, self._depth]
@property
def observation(self):
self._streetlearn.engine.RenderObservation(self._buffer)
return reshape_hwc(self._buffer, self._depth, self._width, self._height)
class GraphImage(Observation):
"""RGB pixel data of the graph with shape (C, H, W) where C = 3."""
name = 'graph_image'
observation_spec_dtypes = np.uint8
def __init__(self, streetlearn):
super(GraphImage, self).__init__(streetlearn)
self._width = streetlearn.config["graph_width"]
self._height = streetlearn.config["graph_height"]
self._depth = 3
self._buffer = np.empty(self._depth * self._width * self._height,
dtype=np.uint8)
@property
def observation_spec(self):
return [self._depth, self._height, self._width]
@property
def observation(self):
highlighted_panos = self._streetlearn.game.highlighted_panos()
return self._streetlearn.engine.DrawGraph(highlighted_panos, self._buffer)
class GraphImageHWC(GraphImage):
"""RGB pixel data of the graph with shape (H, W, C) where C = 3."""
name = 'graph_image_hwc'
observation_spec_dtypes = np.uint8
@property
def observation_spec(self):
return [self._height, self._width, self._depth]
@property
def observation(self):
highlighted_panos = self._streetlearn.game.highlighted_panos()
self._streetlearn.engine.DrawGraph(highlighted_panos, self._buffer)
return reshape_hwc(self._buffer, self._depth, self._width, self._height)
class Yaw(Observation):
"""The agent's current yaw (different from the current pano heading)."""
name = 'yaw'
observation_spec_dtypes = np.float64
@property
def observation_spec(self):
return [1]
@property
def observation(self):
return np.array(self._streetlearn.engine.GetYaw(), dtype=np.float64)
class Pitch(Observation):
"""The agent's current pitch."""
name = 'pitch'
observation_spec_dtypes = np.float64
@property
def observation_spec(self):
return [1]
@property
def observation(self):
return np.array(self._streetlearn.engine.GetPitch(), dtype=np.float64)
class YawLabel(Observation):
"""The agent's current yaw (different from the current pano heading)."""
name = 'yaw_label'
observation_spec_dtypes = np.uint8
@property
def observation_spec(self):
return [1]
@property
def observation(self):
yaw = self._streetlearn.engine.GetYaw() % 360.0
return np.array(yaw * _NUM_HEADING_BINS / 360.0, dtype=np.uint8)
class Metadata(Observation):
"""Metadata about the current pano."""
name = 'metadata'
observation_spec_dtypes = bytearray
@property
def observation_spec(self):
return [_METADATA_COUNT]
@property
def observation(self):
pano_id = self._streetlearn.current_pano_id
return bytearray(self._streetlearn.engine.GetMetadata(
pano_id).pano.SerializeToString())
class TargetMetadata(Observation):
"""Metadata about the target pano."""
name = 'target_metadata'
observation_spec_dtypes = bytearray
@property
def observation_spec(self):
return [_METADATA_COUNT]
@property
def observation(self):
goal_id = self._streetlearn.game.goal_id
if goal_id:
return bytearray(self._streetlearn.engine.GetMetadata(
goal_id).pano.SerializeToString())
return bytearray()
class LatLng(Observation):
"""The agent's current lat/lng coordinates, scaled according to config params
using bbox_lat_min and bbox_lat_max, as well as bbox_lng_min and bbox_lng_max,
so that scaled lat/lng take values between 0 and 1 within the bounding box.
"""
name = 'latlng'
observation_spec_dtypes = np.float64
def _scale_lat(self, lat):
den = self._streetlearn.bbox_lat_max - self._streetlearn.bbox_lat_min
return ((lat - self._streetlearn.bbox_lat_min) / den if (den > 0) else 0)
def _scale_lng(self, lng):
den = self._streetlearn.bbox_lng_max - self._streetlearn.bbox_lng_min
return ((lng - self._streetlearn.bbox_lng_min) / den if (den > 0) else 0)
@property
def observation_spec(self):
return [2]
@property
def observation(self):
pano_id = self._streetlearn.current_pano_id
pano_data = self._streetlearn.engine.GetMetadata(pano_id).pano
lat_scaled = self._scale_lat(pano_data.coords.lat)
lng_scaled = self._scale_lng(pano_data.coords.lng)
return np.array([lat_scaled, lng_scaled], dtype=np.float64)
class LatLngLabel(LatLng):
"""The agent's current yaw (different from the current pano heading)."""
name = 'latlng_label'
observation_spec_dtypes = np.int32
def _latlng_bin(self, pano_id):
pano_data = self._streetlearn.engine.GetMetadata(pano_id).pano
lat_bin = np.floor(self._scale_lat(pano_data.coords.lat) * _NUM_LAT_BINS)
lat_bin = np.max([np.min([lat_bin, _NUM_LAT_BINS-1]), 0])
lng_bin = np.floor(self._scale_lng(pano_data.coords.lng) * _NUM_LNG_BINS)
lng_bin = np.max([np.min([lng_bin, _NUM_LNG_BINS-1]), 0])
latlng_bin = lat_bin * _NUM_LNG_BINS + lng_bin
return latlng_bin
@property
def observation_spec(self):
return [1]
@property
def observation(self):
pano_id = self._streetlearn.current_pano_id
return np.array(self._latlng_bin(pano_id), dtype=np.int32)
class TargetLatLng(LatLng):
"""The agent's target lat/lng coordinates."""
name = 'target_latlng'
@property
def observation(self):
goal_id = self._streetlearn.game.goal_id
if goal_id:
pano_data = self._streetlearn.engine.GetMetadata(goal_id).pano
lat_scaled = self._scale_lat(pano_data.coords.lat)
lng_scaled = self._scale_lng(pano_data.coords.lng)
return np.array([lat_scaled, lng_scaled], dtype=np.float64)
return np.array([0, 0], dtype=np.float64)
class TargetLatLngLabel(LatLngLabel):
"""The agent's current yaw (different from the current pano heading)."""
name = 'target_latlng_label'
@property
def observation(self):
goal_id = self._streetlearn.game.goal_id
if goal_id:
return np.array(self._latlng_bin(goal_id), dtype=np.int32)
return np.array(0, dtype=np.int32)
class Thumbnails(Observation):
"""Thumbnails' pixel data."""
name = 'thumbnails'
observation_spec_dtypes = np.uint8
@property
def observation_spec(self):
return self._streetlearn.game.thumbnails().shape
@property
def observation(self):
return self._streetlearn.game.thumbnails()
class Instructions(Observation):
"""StreetLang instructions."""
name = 'instructions'
observation_spec_dtypes = str
@property
def observation_spec(self):
return [1]
@property
def observation(self):
return ('|'.join(self._streetlearn.game.instructions())).encode('utf-8')
class GroundTruthDirection(Observation):
"""Direction in degrees that the agent needs to take now."""
name = 'ground_truth_direction'
observation_spec_dtypes = np.float32
@property
def observation_spec(self):
return [1]
@property
def observation(self):
return np.array(self._streetlearn.game.ground_truth_direction(),
dtype=np.float32)
class Neighbors(Observation):
"""IDs of neighboring panos."""
name = 'neighbors'
observation_spec_dtypes = np.uint8
@property
def observation_spec(self):
return [self._streetlearn.neighbor_resolution]
@property
def observation(self):
return np.asarray(
self._streetlearn.engine.GetNeighborOccupancy(
self._streetlearn.neighbor_resolution),
dtype=np.uint8)
class PrevReward(Observation):
"""The agent's reward at the previous time step."""
name = 'prev_reward'
observation_spec_dtypes = np.float32
@property
def observation_spec(self):
return [1]
@property
def observation(self):
return np.array(self._streetlearn.prev_reward(), dtype=np.float32)
class PrevAction(Observation):
"""The agent's action at the previous time step."""
name = 'prev_action'
observation_spec_dtypes = np.float32
@property
def observation_spec(self):
return [4]
@property
def observation(self):
return np.array(self._streetlearn.prev_action(), dtype=np.float32)
|
streetlearn-master
|
streetlearn/python/environment/observations.py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level with reward circles around each waypoint.
This file extends the instruction-following game by adding reward densification,
giving fractional reward to agents as they approach a waypoint. At any point in
time, the level will project a reward cone around the next waypoint given the
most recently passed waypoint.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from streetlearn.python.environment import instructions_curriculum
class InstructionsDensification(
instructions_curriculum.InstructionsCurriculum):
"""StreetLang game with a cone around each waypoint reward."""
def __init__(self, config):
"""Creates an instance of the StreetLang level.
Args:
config: config dict of various settings.
"""
super(InstructionsDensification, self).__init__(config)
self._max_reward_per_cone = config['max_reward_per_cone']
self._cone_radius_meters = config['cone_radius_meters']
self._min_distance_reached = float('inf')
self._distance_to_next_waypoint = float('inf')
# We cannot have coins in this game.
assert config['proportion_of_panos_with_coins'] == 0
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
self._min_distance_reached = float('inf')
self._distance_to_next_waypoint = float('inf')
return super(InstructionsDensification, self).on_reset(streetlearn)
def get_reward(self, streetlearn):
"""Returns the reward from the last step.
Args:
streetlearn: a StreetLearn instance.
Returns:
reward: the reward from the last step.
"""
# Calculate distance to next waypoint for _check_reward to use.
next_waypoint_pano = self._pano_by_step[self._current_step + 1]
self._distance_to_next_waypoint = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, next_waypoint_pano)
# Check if pano ID is within a cone from the next waypoint.
dense_reward = 0
if self._distance_to_next_waypoint < min(self._cone_radius_meters,
self._min_distance_reached):
dense_reward = (
self._max_reward_per_cone *
(self._cone_radius_meters - self._distance_to_next_waypoint) /
self._cone_radius_meters)
self._min_distance_reached = self._distance_to_next_waypoint
logging.info('distance_to_next_waypoint=%f, extra reward=%f',
self._distance_to_next_waypoint, dense_reward)
# Compute the regular reward using the logic from the base class.
prev_step = self._current_step
reward = super(InstructionsDensification, self).get_reward(streetlearn)
# Reset the minimum distance threshold?
if prev_step != self._current_step:
self._min_distance_reached = float('inf')
return reward + dense_reward
|
streetlearn-master
|
streetlearn/python/environment/instructions_densification.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python interface to the StreetLearn engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from streetlearn.python.environment.coin_game import CoinGame
from streetlearn.python.environment.courier_game import CourierGame
from streetlearn.python.environment.curriculum_courier_game import CurriculumCourierGame
from streetlearn.python.environment.default_config import ApplyDefaults
from streetlearn.python.environment.default_config import CreateGame
from streetlearn.python.environment.exploration_game import ExplorationGame
from streetlearn.python.environment.game import Game
from streetlearn.python.environment.goal_instruction_game import GoalInstructionGame
from streetlearn.python.environment.incremental_instruction_game import IncrementalInstructionGame
from streetlearn.python.environment.observations import Observation
from streetlearn.python.environment.step_by_step_instruction_game import StepByStepInstructionGame
from streetlearn.python.environment.streetlearn import get_action_set
from streetlearn.python.environment.streetlearn import StreetLearn
from streetlearn.python.environment.thumbnail_helper import ThumbnailHelper
|
streetlearn-master
|
streetlearn/python/environment/__init__.py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the goal reward instruction-following game.
In this environment, the agent receives a reward for reaching the goal of
a given set of instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from streetlearn.python.environment import instructions_curriculum
class GoalInstructionGame(instructions_curriculum.InstructionsCurriculum):
"""StreetLang game with goal reward only."""
def __init__(self, config):
"""Creates an instance of the StreetLearn level.
Args:
config: config dict of various settings.
"""
super(GoalInstructionGame, self).__init__(config)
# Disable waypoint rewards.
self._reward_at_waypoint = 0
|
streetlearn-master
|
streetlearn/python/environment/goal_instruction_game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The StreetLearn RL environment.
Episodes take place either in a mini-map created by performing a breadth-first
traversal of the StreetView graph starting from a starting location, or in
the entire fully-connected graph.
Observations:
{
view_image: numpy array of dimension [3, height, width] containing the
street imagery.
graph_image: numpy array of dimension [3, graph_height, graph_width]
containing the map graph image.
view_image_hwc: numpy array of dimension [height, width, 3] containing
the street imagery.
graph_image_hwc: numpy array of dimension [graph_height, graph_width, 3]
containing the map graph images.
metadata: learning_deepmind.datasets.street_learn.Pano proto
without compressed_image.
target_metadata: learning_deepmind.datasets.street_learn.Pano proto
without compressed_image.
}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import inflection
import numpy as np
import six
import time
from streetlearn.engine.python import color
from streetlearn.engine.python import streetlearn_engine
from streetlearn.python.environment import default_config
from streetlearn.python.environment import observations
_MIN_ZOOM = 1
_MAX_ZOOM = 32
def _action(*entries):
return np.array(entries, dtype=np.float)
ACTIONS = {
'move_forward': _action(1, 0.0, 0.0, 0.0),
'horizontal_rotation': _action(0, 1.0, 0.0, 0.0),
'vertical_rotation': _action(0, 0.0, 1.0, 0.0),
'map_zoom': _action(0, 0.0, 0.0, 1.0),
}
NUM_ACTIONS = 4
ACTION_SETS = {
"streetlearn_default": lambda rotation_speed: (
ACTIONS["move_forward"],
ACTIONS["horizontal_rotation"] * (-rotation_speed),
ACTIONS["horizontal_rotation"] * rotation_speed),
"streetlearn_fast_rotate": lambda rotation_speed: (
ACTIONS["move_forward"],
ACTIONS["horizontal_rotation"] * (-rotation_speed),
ACTIONS["horizontal_rotation"] * (-rotation_speed * 3),
ACTIONS["horizontal_rotation"] * rotation_speed,
ACTIONS["horizontal_rotation"] * rotation_speed * 3),
"streetlearn_tilt": lambda rotation_speed: (
ACTIONS["move_forward"],
ACTIONS["horizontal_rotation"] * (-rotation_speed),
ACTIONS["horizontal_rotation"] * rotation_speed,
ACTIONS["vertical_rotation"] * rotation_speed,
ACTIONS["vertical_rotation"] * (-rotation_speed)),
}
def get_action_set(action_spec, rotation_speed):
"""Returns the set of StreetLearn actions for the given action_spec."""
# If action_spec is a string, it should be the name of a standard action set.
if isinstance(action_spec, six.string_types):
if action_spec not in ACTION_SETS:
raise ValueError("Unrecognized action specification %s." % action_spec)
else:
return np.array(ACTION_SETS[action_spec](rotation_speed), dtype=np.float)
raise ValueError("Action specification %s not a string." % action_spec)
def _log_dictionary(dictionary):
for k, v in dictionary.items():
v = dictionary[k]
if isinstance(v, (int, float, bool, list, str)):
logging.info(k + ': ' + str(v))
class StreetLearn(object):
"""The Streetlearn environment."""
def __init__(self, dataset_path, config, game, engine=None):
"""Construct the StreetLearn environment.
Args:
dataset_path: filesystem path where the dataset resides.
config: dictionary containing various config settings. Will be extended
with defaults from default_config.DEFAULT_CONFIG.
game: an instance of Game.
engine: an instance of the StreetLearn engine (used when cloning an
environment).
"""
assert game, "Did not provide game."
logging.info('dataset_path:')
logging.info(dataset_path)
logging.info('config:')
_log_dictionary(config)
logging.info('game:')
logging.info(game)
self._config = default_config.ApplyDefaults(config)
self._seed = self._config["seed"]
self._start_pano_id = self._config["start_pano"]
self._zoom = self._config["graph_zoom"]
self._black_on_white = self._config["graph_black_on_white"]
self._frame_cap = self._config["frame_cap"]
self._field_of_view = self._config["field_of_view"]
self._neighbor_resolution = self._config["neighbor_resolution"]
self._sample_graph_depth = self._config["sample_graph_depth"]
self._min_graph_depth = self._config["min_graph_depth"]
self._max_graph_depth = self._config["max_graph_depth"]
self._full_graph = self._config["full_graph"]
self._color_for_observer = color.Color(*self._config["color_for_observer"])
self._action_spec = self._config["action_spec"]
self._rotation_speed = self._config["rotation_speed"]
self._auto_reset = self._config["auto_reset"]
self._action_set = get_action_set(self._action_spec, self._rotation_speed)
logging.info('Action set:')
logging.info(self._action_set)
self._bbox_lat_min = self._config["bbox_lat_min"]
self._bbox_lat_max = self._config["bbox_lat_max"]
self._bbox_lng_min = self._config["bbox_lng_min"]
self._bbox_lng_max = self._config["bbox_lng_max"]
self._game = game
self._current_pano_id = None
self._episode_id = -1
self._frame_count = 0
self._prev_reset = time.time()
if engine:
logging.info("Cloning an existing StreetLearnEngine.")
self._engine = engine.Clone(
width=self._config["width"],
height=self._config["height"],
graph_width=self._config["graph_width"],
graph_height=self._config["graph_height"],
status_height=self._config["status_height"],
field_of_view=self._field_of_view,
min_graph_depth=self._min_graph_depth,
max_graph_depth=self._max_graph_depth)
else:
logging.info("Creating an new StreetLearnEngine.")
self._engine = streetlearn_engine.StreetLearnEngine.Create(
dataset_path,
width=self._config["width"],
height=self._config["height"],
graph_width=self._config["graph_width"],
graph_height=self._config["graph_height"],
status_height=self._config["status_height"],
field_of_view=self._field_of_view,
min_graph_depth=self._min_graph_depth,
max_graph_depth=self._max_graph_depth,
max_cache_size=self._config["max_cache_size"])
assert self._engine, "Could not initialise engine from %r." % dataset_path
self._observations = []
for name in self._config["observations"]:
try:
self._observations.append(observations.Observation.create(name, self))
except ValueError as e:
logging.warning(str(e))
self._reward = 0
self._prev_reward = 0
self._prev_action = self._action_set[0]
self._done = False
self._info = {}
@property
def config(self):
return self._config
@property
def seed(self):
return self._seed
@property
def game(self):
return self._game
@property
def field_of_view(self):
return self._field_of_view
@property
def current_pano_id(self):
return self._current_pano_id
@property
def frame_cap(self):
return self._frame_cap
@frame_cap.setter
def frame_cap(self, value):
self._frame_cap = value
@property
def frame_count(self):
return self._frame_count
@property
def graph(self):
return self._graph
@property
def engine(self):
return self._engine
@property
def neighbor_resolution(self):
return self._neighbor_resolution
@property
def bbox_lat_min(self):
return self._bbox_lat_min
@property
def bbox_lat_max(self):
return self._bbox_lat_max
@property
def bbox_lng_min(self):
return self._bbox_lng_min
@property
def bbox_lng_max(self):
return self._bbox_lng_max
@property
def cache_size(self):
return self._engine.GetNodeCacheSize()
def observation_spec(self):
"""Returns the observation spec, dependent on the observation format."""
return {observation.name: observation.observation_spec
for observation in self._observations}
def action_set(self):
"""Returns the set of actions, mapping integer actions to 1D arrays."""
return self._action_set
def action_spec(self):
"""Returns the action spec."""
return ACTIONS
def reset(self):
"""Start a new episode."""
reset_time = time.time()
logging.info('reset: seed %d, previous episode (%d frames) lasted %f sec',
self._seed, self._frame_count, reset_time - self._prev_reset)
self._prev_reset = reset_time
self._prev_reward = 0
self._prev_action = self._action_set[0]
self._frame_count = 0
self._episode_id += 1
if self._sample_graph_depth:
max_depth = np.random.randint(self._min_graph_depth,
self._max_graph_depth + 1)
self._engine.SetGraphDepth(self._min_graph_depth, max_depth)
self._engine.InitEpisode(self._episode_id, self._seed)
# Build a new graph if we don't have one yet.
if not self._current_pano_id:
if self._full_graph:
self._current_pano_id = self._engine.BuildEntireGraph()
elif self._start_pano_id:
self._current_pano_id = self._engine.BuildGraphWithRoot(
self._start_pano_id)
else:
self._current_pano_id = self._engine.BuildRandomGraph()
logging.info('seed %d: built new graph with root %s',
self._seed, self._current_pano_id)
# else respawn in current graph.
elif not self._start_pano_id:
self._current_pano_id = np.random.choice(
list(self._engine.GetGraph().keys()))
self._engine.SetPosition(self._current_pano_id)
logging.info('seed %d: reusing existing graph and respawning at %s',
self._seed, self._current_pano_id)
self._graph = self._engine.GetGraph()
highlighted_panos = self._game.on_reset(self)
self._engine.InitGraphRenderer(self._color_for_observer, highlighted_panos,
self._black_on_white)
self._engine.SetZoom(_MAX_ZOOM)
def goto(self, pano_id, yaw):
"""Go to a specific pano and yaw in the environment.
Args:
pano_id: a string containing the ID of a pano.
yaw: a float with relative yaw w.r.t. north.
Returns:
observation: tuple with observations.
"""
current_pano_id = self._engine.SetPosition(pano_id)
assert pano_id == current_pano_id
yaw = (yaw + 180) % 360 - 180
self._engine.RotateObserver(yaw, 0.0)
assert yaw == self._engine.GetYaw()
return self.observation()
def step(self, action):
"""Takes a step in the environment.
Args:
action: a 1d array containing a combination of actions.
Returns:
observation: tuple with observations for the last time step.
reward: scalar reward at the last time step.
done: boolean indicating the end of an episode.
info: dictionary with additional debug information.
"""
self._frame_count += 1
if type(action) != np.ndarray:
action = np.array(action, dtype=np.float)
self._prev_action = action
assert action.size == NUM_ACTIONS, "Wrong number of actions."
move_forward = np.dot(action, ACTIONS['move_forward'])
horizontal_rotation = np.dot(action, ACTIONS['horizontal_rotation'])
vertical_rotation = np.dot(action, ACTIONS['vertical_rotation'])
map_zoom = np.dot(action, ACTIONS['map_zoom'])
if move_forward:
self._current_pano_id = self._engine.MoveToNextPano()
if map_zoom > 0:
self._zoom = min(self._zoom * 2, _MAX_ZOOM)
elif map_zoom < 0:
self._zoom = max(self._zoom / 2, _MIN_ZOOM)
if horizontal_rotation or vertical_rotation:
self._engine.RotateObserver(horizontal_rotation, vertical_rotation)
self._engine.SetZoom(self._zoom)
self._game.on_step(self)
# Update the reward and done flag. Because we do not know the code logic
# inside each game, it is safer to obtain these immediately after step(),
# and store them for subsequent calls to reward(), done() and info().
self._prev_reward = self._reward
self._reward = self._game.get_reward(self)
self._done = (self._frame_count > self._frame_cap) or self._game.done()
self._info = self._game.get_info(self)
if self._auto_reset and self._done:
self.reset()
# Return
return self.observation(), self.reward(), self.done(), self.info()
def observation(self):
"""Returns the observations for the last time step."""
return {item.name: item.observation for item in self._observations}
def reward(self):
"""Returns the reward for the last time step."""
return self._reward
def done(self):
"""Return a flag indicating the end of the current episode."""
return self._done
def info(self):
"""Return a dictionary with environment information at the current step."""
return self._info
def prev_reward(self):
"""Returns the reward for the previous time step."""
return self._prev_reward
def prev_action(self):
"""Returns the action for the previous time step."""
return self._prev_action
def get_metadata(self, pano_id):
"""Return the metadata corresponding to the selected pano.
Args:
pano_id: a string containing the ID of a pano.
Returns:
metadata: a protocol buffer with the pano metadata.
"""
if hasattr(self, '_graph') and pano_id in self.graph:
return self._engine.GetMetadata(pano_id)
else:
return None
def render(self):
"""Empty function, for compatibility with OpenAI Gym."""
pass
|
streetlearn-master
|
streetlearn/python/environment/streetlearn.py
|
"""Thumbnail helper class used in Taxi and Streetlang levels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class ThumbnailHelper(object):
"""Thumbnail helper class."""
def __init__(self):
self._width = None
self._height = None
def get_thumbnail(self, streetlearn, pano_id, heading):
"""Fetch the thumbnail from the environment.
Args:
streetlearn: a streetlearn instance.
pano_id: Pano id of the thumbnail.
heading: Heading in degrees for the thumbnail.
Returns:
Thumbnail ndarray.
"""
observation = streetlearn.goto(pano_id, heading)
thumbnail = observation['view_image_hwc']
if not self._width:
self._width = streetlearn.config['width']
self._height = streetlearn.config['height']
thumbnail = thumbnail.reshape([self._height, self._width, 3])
return thumbnail
|
streetlearn-master
|
streetlearn/python/environment/thumbnail_helper.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coin game level with early termination.
In this environment, the agent receives a reward for every coin it collects,
and the episode ends when all the coins are collected.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from streetlearn.python.environment import coin_game
class ExplorationGame(coin_game.CoinGame):
def done(self):
"""Returns a flag indicating the end of the current episode.
This game ends when all the coins are collected.
"""
return not bool(self._coin_pano_id_set)
|
streetlearn-master
|
streetlearn/python/environment/exploration_game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the courier task with random goals/targets.
In this environment, the agent receives a reward for every coin it collects and
an extra reward for locating the goal pano.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import six
from streetlearn.engine.python import color
from streetlearn.python.environment import coin_game
class CourierGame(coin_game.CoinGame):
"""Coin game that gives extra reward for finding the goal pano. A courier goal
is randomly selected from panos in the graph. On success or timeout, a new
goal is chosen. The episode ends after a fixed episode length.
"""
def __init__(self, config):
"""Constructor.
This coin game gives extra reward for finding the goal pano, and resets the
goal once the goal has been found (or on timeout). Panos can be assigned
rewards (coins) randomly and the agent will receive the reward the first
time they visit these panos.
Args:
config: config dict of various settings.
"""
super(CourierGame, self).__init__(config)
self._reward_current_goal = config['max_reward_per_goal']
self._min_radius_meters = config['min_radius_meters']
self._max_radius_meters = config['max_radius_meters']
self._goal_timeout = config['goal_timeout']
self._colors['goal'] = color.Color(*config['color_for_goal'])
self._colors['shortest_path'] = color.Color(
*config['color_for_shortest_path'])
self._num_steps_this_goal = 0
self._success_inverse_path_len = []
self._min_distance_reached = np.finfo(np.float32).max
self._initial_distance_to_goal = np.finfo(np.float32).max
self._current_goal_id = None
self._visited_panos = set()
self._shortest_path = {}
self._timed_out = False
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Selects a random pano as goal destination and resets episode statistics.
If there are any coins, clears the set of touched panos and randomly
generates reward-yielding coins and populates pano_id_to_color.
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
# Populate the list of panos and assign optional coins to panos.
pano_id_to_color = super(CourierGame, self).on_reset(streetlearn)
# Assign the goal location to one of the panos.
self._pick_random_goal(streetlearn)
# Resets the episode statistics.
self._num_steps_this_goal = 0
self._success_inverse_path_len = []
return pano_id_to_color
@property
def goal_id(self):
"""Returns the ID of the goal Pano."""
return self._current_goal_id
def get_reward(self, streetlearn):
"""Looks at current_pano_id and collects any reward found there.
Args:
streetlearn: A streetlearn instance.
Returns:
reward: the reward from the last step.
"""
# If we have exceeded the maximum steps to look for a goal, reset the goal.
if self._num_steps_this_goal > self._goal_timeout:
logging.info('seed %d, frame %d: courier target TIMEOUT (%d steps)',
streetlearn.seed, streetlearn.frame_count,
self._num_steps_this_goal)
self._num_steps_this_goal = 0
self._pick_random_goal(streetlearn)
(reward, found_goal) = self._compute_reward(streetlearn)
# If we have found the goal, set a new one and update episode statistics.
if found_goal:
logging.info('seed %d, frame %d: courier target FOUND (%d steps)',
streetlearn.seed, streetlearn.frame_count,
self._num_steps_this_goal)
_, num_remaining_panos_to_goal_center = self._shortest_paths(
streetlearn, self._current_goal_id, streetlearn.current_pano_id)
self._success_inverse_path_len.append(
self._compute_spl_current_goal(streetlearn))
self._pick_random_goal(streetlearn)
self._num_steps_this_goal = 0
# Give additional reward if current pano has a coin.
if streetlearn.current_pano_id in self._coin_pano_id_set:
reward += self._reward_per_coin
self._coin_pano_id_set.remove(streetlearn.current_pano_id)
self._num_steps_this_goal += 1
return reward
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = super(CourierGame, self).get_info(streetlearn)
info['num_steps_this_goal'] = self._num_steps_this_goal
info['current_goal_id'] = self._current_goal_id
info['min_distance_reached'] = self._min_distance_reached
info['initial_distance_to_goal'] = self._initial_distance_to_goal
info['reward_current_goal'] = self._reward_current_goal
num_successes = len(self._success_inverse_path_len)
info['num_successes'] = num_successes
info['spl'] = sum(self._success_inverse_path_len) / (num_successes + 1)
if num_successes > 0:
info['spl_without_last_goal'] = (
sum(self._success_inverse_path_len) / num_successes)
else:
info['spl_without_last_goal'] = 0
next_pano_id = self._panos_to_goal[streetlearn.current_pano_id]
info['next_pano_id'] = next_pano_id
bearing_to_next_pano = streetlearn.engine.GetPanoBearing(
streetlearn.current_pano_id, next_pano_id) - streetlearn.engine.GetYaw()
info['bearing_to_next_pano'] = (bearing_to_next_pano + 180) % 360 - 180
return info
def done(self):
"""Returns a flag indicating the end of the current episode.
This game ends only at the end of the episode or if the goal times out.
During a single episode, every time a goal is found, a new one is chosen,
until the time runs out.
"""
if self._timed_out:
self._timed_out = False
return True
else:
return False
def _sample_random_goal(self, streetlearn):
"""Randomly sets a new pano for the current goal.
Args:
streetlearn: The StreetLearn environment.
"""
goals = [goal for goal in streetlearn.graph
if ((goal != self._current_goal_id) and
(goal != streetlearn.current_pano_id))]
self._current_goal_id = np.random.choice(goals)
self._min_distance_reached = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, self._current_goal_id)
self._initial_distance_to_goal = self._min_distance_reached
def _pick_random_goal(self, streetlearn):
"""Randomly sets a new pano for the current goal.
Args:
streetlearn: The StreetLearn environment.
"""
self._visited_panos.clear()
self._sample_random_goal(streetlearn)
logging.info('seed %d, frame %d: new goal id: %s distance: %f',
streetlearn.seed, streetlearn.frame_count, self.goal_id,
self._min_distance_reached)
pano_data = streetlearn.engine.GetMetadata(self.goal_id).pano
logging.info('seed %d: new goal at (%f, %f)',
streetlearn.seed, pano_data.coords.lat, pano_data.coords.lng)
# Compute the extended graph and shortest path to goal to estimate
# the reward to give to the agent.
shortest_path, num_panos = self._shortest_paths(
streetlearn, self._current_goal_id, streetlearn.current_pano_id)
self._reward_current_goal = num_panos
logging.info('seed %d: goal reward depends on #panos to goal: %d',
streetlearn.seed, self._reward_current_goal)
# Decorate the graph.
self._pano_id_to_color = {coin_pano_id: self._colors['coin']
for coin_pano_id in self._coin_pano_id_set}
self._update_pano_id_to_color()
for pano_id in six.iterkeys(shortest_path):
self._pano_id_to_color[pano_id] = self._colors['shortest_path']
self._pano_id_to_color[self.goal_id] = self._colors['goal']
def _compute_reward(self, streetlearn):
"""Reward is a piecewise linear function of distance to the goal.
If agent is greater than max_radius_meters from the goal, reward is 0. If
agent is less than min_radius_reters, reward is max_reward_per_goal. Between
min and max radius the reward is linear between 0 and max_reward_per_goal.
Args:
streetlearn: The StreetLearn environment.
Returns:
The reward for the current step.
"""
# Do not give rewards for already visited panos.
reward = 0
found_goal = False
if streetlearn.current_pano_id in self._visited_panos:
return (reward, found_goal)
# Mark the pano as visited and compute distance to goal.
self._visited_panos.add(streetlearn.current_pano_id)
distance_to_goal = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, self.goal_id)
if distance_to_goal < self._min_radius_meters:
# Have we reached the goal?
reward = self._reward_current_goal
found_goal = True
logging.info(
'seed %d, frame %d: reached goal, distance_to_goal=%s, reward=%s',
streetlearn.seed, streetlearn.frame_count, distance_to_goal, reward)
else:
if distance_to_goal < self._max_radius_meters:
# Early reward shaping.
if distance_to_goal < self._min_distance_reached:
reward = (self._reward_current_goal *
(self._max_radius_meters - distance_to_goal) /
(self._max_radius_meters - self._min_radius_meters))
self._min_distance_reached = distance_to_goal
logging.info('seed %d, frame %d: distance_to_goal=%f, reward=%d',
streetlearn.seed, streetlearn.frame_count,
distance_to_goal, reward)
return (reward, found_goal)
def _compute_spl_current_goal(self, streetlearn):
"""Compute the success weighted by inverse path length for the current goal.
We use the SPL definition from Eq. 1 in the following paper:
Anderson et al. (2018) "On Evaluation of Embodied Navigation Agents"
https://arxiv.org/pdf/1807.06757.pdf
Args:
streetlearn: The StreetLearn environment.
Returns:
The SPL metric for the current goal.
"""
# Since reaching the goal is defined as being within a circle around the
# goal pano, we subtract the panoramas within that circle from the shortest
# path length estimate, as well as from the actual path length.
# We add 1 to handle cases when the agent spawned within that circle.
_, num_remaining_panos_to_goal = self._shortest_paths(
streetlearn, self._current_goal_id, streetlearn.current_pano_id)
shortest_path_len = self._reward_current_goal - num_remaining_panos_to_goal
shortest_path_len = max(shortest_path_len, 1)
actual_path_len = len(self._visited_panos) - num_remaining_panos_to_goal
actual_path_len = max(actual_path_len, 1)
return shortest_path_len / max(actual_path_len, shortest_path_len)
|
streetlearn-master
|
streetlearn/python/environment/courier_game.py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the single instruction-following game with curriculum.
In this environment, the agent receives a reward for every waypoint it hits
as well as a larger reward for reaching the final goal. At any point in time
the agent will receive exactly one instruction, matching the next waypoint.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from streetlearn.python.environment import instructions_densification
class StepByStepInstructionGame(
instructions_densification.InstructionsDensification):
"""StreetLang game for following a single instructions at a time."""
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
super(StepByStepInstructionGame, self).on_reset(streetlearn)
# Save instruction and thumbnail vectors into a separate holder.
# It is sufficient to use _with_goal as this is a superset of the other.
self._all_thumbs = self._thumbnails.copy()
self._all_instrs = self._instructions
# Initialise everything to the first entry
self._thumbnails[2:, :] = 0 # Zero all but the first and second one.
self._instructions = [self._all_instrs[0]]
logging.info(self._all_instrs)
logging.info(self._instructions)
return self._pano_id_to_color
def _check_reward(self, pano_id, streetlearn):
"""Check what reward the current pano yields, based on instructions.
Args:
pano_id: centroid pano id.
streetlearn: streetlearn graph for establishing neighbours.
Returns:
The reward for the current step.
"""
reward = 0
previous_step = self._current_step
reward = super(StepByStepInstructionGame, self)._check_reward(
pano_id, streetlearn)
if previous_step != self._current_step and not self._reached_goal:
# If we changed the step, but haven't terminated the game, update instrs.
self._thumbnails[0, :] = self._all_thumbs[self._current_step]
self._thumbnails[1, :] = (
self._all_thumbs[self._current_step + 1])
self._instructions = [self._all_instrs[self._current_step]]
# Remove epsilon from reward to avoid triggering the waypoint switchers.
epsilon = 0.01
reward -= epsilon
logging.info('Switched from step %d to step %d.',
previous_step, self._current_step)
logging.info(self._instructions)
return reward
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = super(StepByStepInstructionGame, self).get_info(streetlearn)
info['current_step'] = 0
return info
|
streetlearn-master
|
streetlearn/python/environment/step_by_step_instruction_game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings for the StreetLearn environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from streetlearn.python.environment import coin_game
from streetlearn.python.environment import courier_game
from streetlearn.python.environment import curriculum_courier_game
from streetlearn.python.environment import exploration_game
from streetlearn.python.environment import goal_instruction_game
from streetlearn.python.environment import incremental_instruction_game
from streetlearn.python.environment import step_by_step_instruction_game
DEFAULT_CONFIG = {
'seed': 1234,
'width': 320,
'height': 240,
'graph_width': 320,
'graph_height': 240,
'status_height': 10,
'field_of_view': 60,
'min_graph_depth': 200,
'max_graph_depth': 200,
'max_cache_size': 1000,
'bbox_lat_min': -90.0,
'bbox_lat_max': 90.0,
'bbox_lng_min': -180.0,
'bbox_lng_max': 180.0,
'max_reward_per_goal': 10.0,
'min_radius_meters': 100.0,
'max_radius_meters': 200.0,
'timestamp_start_curriculum': 0.0,
'annealing_rate_curriculum': 2.0,
'hours_curriculum_part_1': 0.0,
'hours_curriculum_part_2': 24.0,
'min_goal_distance_curriculum': 500.0,
'max_goal_distance_curriculum': 3500.0,
'instruction_curriculum_type': 0,
'curriculum_num_instructions_part_1': 2,
'curriculum_bin_distance': 100.0,
'curriculum_frame_cap': False,
'curriculum_frame_cap_part_1': 100,
'max_reward_per_cone': 0.49,
'cone_radius_meters': 50.0,
'goal_timeout': 1000,
'frame_cap': 1000,
'full_graph': True,
'sample_graph_depth': True,
'start_pano': '',
'graph_zoom': 32,
'graph_black_on_white': False,
'show_shortest_path': False,
'calculate_ground_truth': False,
'neighbor_resolution': 8,
'color_for_touched_pano': (1.0, 0.5, 0.5),
'color_for_observer': (0.5, 0.5, 1.0),
'color_for_coin': (1.0, 1.0, 0.0),
'color_for_goal': (1.0, 0.0, 0.0),
'color_for_shortest_path': (1.0, 0.0, 1.0),
'color_for_waypoint': (0, 0.7, 0.7),
'observations': ['view_image', 'graph_image'],
'reward_per_coin': 1.0,
'reward_at_waypoint': 0.5,
'reward_at_goal': 1.0,
'instruction_file': None,
'num_instructions': 5,
'max_instructions': 5,
'proportion_of_panos_with_coins': 0.5,
'game_name': 'coin_game',
'action_spec': 'streetlearn_fast_rotate',
'rotation_speed': 22.5,
'auto_reset': True,
}
NAME_TO_GAME = {
'coin_game':
coin_game.CoinGame,
'courier_game':
courier_game.CourierGame,
'curriculum_courier_game':
curriculum_courier_game.CurriculumCourierGame,
'exploration_game':
exploration_game.ExplorationGame,
'goal_instruction_game':
goal_instruction_game.GoalInstructionGame,
'incremental_instruction_game':
incremental_instruction_game.IncrementalInstructionGame,
'step_by_step_instruction_game':
step_by_step_instruction_game.StepByStepInstructionGame,
}
def ApplyDefaults(config):
result = copy.copy(config)
for default_key, default_value in DEFAULT_CONFIG.items():
if not default_key in result:
result[default_key] = default_value
else:
if type(default_value) != type(None):
assert type(default_value) == type(result[default_key])
return result
def CreateGame(name, config):
assert name in NAME_TO_GAME, "Unknown game name: %r" % name
return NAME_TO_GAME[name](config)
|
streetlearn-master
|
streetlearn/python/environment/default_config.py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the instruction-following game with a curriculum.
This environment implements the instruction-following game and selects levels
given a particular curriculum strategy, either by slowly increasing the number
of instructions per episode, the maximum distance of routes, or both.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
from absl import logging
import numpy as np
import six
from six.moves import range
from streetlearn.python.environment import instructions_base
_SECONDS_IN_HOUR = 3600
# Curriculum constants.
CURRICULUM_NONE = 0
CURRICULUM_LENGTH_BASED = 1
CURRICULUM_INSTR_BASED = 2
CURRICULUM_LENGTH_INSTR_BASED = 3
# Verbosity constants
NUM_TRAJECTORIES_VERBOSE = 10000
class InstructionsCurriculum(instructions_base.InstructionsBase):
"""Instruction following game with curriculum on distance or #instructions."""
def __init__(self, config):
"""Creates an instance of the StreetLearn level.
Args:
config: config dict of various settings.
"""
super(InstructionsCurriculum, self).__init__(config)
# Curriculum types: 0 = none, 1 = dist. to goal, 2 = instructions
self._curriculum_type = config['instruction_curriculum_type']
self._timestamp_start = config['timestamp_start_curriculum']
self._hours_curriculum_part_1 = config['hours_curriculum_part_1']
self._hours_curriculum_part_2 = config['hours_curriculum_part_2']
self._steps_plateau = config.get('curriculum_steps_plateau', 0)
self._steps_ramp = config.get('curriculum_steps_ramp', 0)
self._curriculum_num_instructions_part_1 = config[
'curriculum_num_instructions_part_1']
self._min_goal_distance = config['min_goal_distance_curriculum']
self._max_goal_distance = config['max_goal_distance_curriculum']
self._curriculum_bin_distance = config['curriculum_bin_distance']
if self._curriculum_type != CURRICULUM_NONE:
logging.info('Curriculum starting at time %f', self._timestamp_start)
if (self._curriculum_type == CURRICULUM_LENGTH_BASED) or (
self._curriculum_type == CURRICULUM_LENGTH_INSTR_BASED):
logging.info('Initial plateau: trajectories of distance at most %f',
self._min_goal_distance)
logging.info('Training ramps up to trajectories of distance at most %f',
self._max_goal_distance)
logging.info('Trajectories sorted in bins of distance length %f',
self._curriculum_bin_distance)
if (self._curriculum_type == CURRICULUM_INSTR_BASED) or (
self._curriculum_type == CURRICULUM_LENGTH_INSTR_BASED):
logging.info('Initial plateau: trajectories with %d instructions',
self._curriculum_num_instructions_part_1)
logging.info('Training ramps up to trajectories with %d instructions',
self._num_instructions)
logging.info('Initial training plateau lasts for %f hours',
self._hours_curriculum_part_1)
logging.info('Training ramps up to longer traj. for %f hours',
self._hours_curriculum_part_2)
# Frame cap curriculum
self._curriculum_frame_cap = config['curriculum_frame_cap']
self._curriculum_frame_cap_part_1 = config['curriculum_frame_cap_part_1']
self._curriculum_frame_cap_extra_steps = max(
0, config['frame_cap'] - self._curriculum_frame_cap_part_1)
if self._curriculum_frame_cap:
logging.info('Initial plateau: trajectories with %d frames',
self._curriculum_frame_cap_part_1)
logging.info('Training ramps up to trajectories with %d extra frames',
self._curriculum_frame_cap_extra_steps)
self._init_complete = self.initialize_curricula(True)
def initialize_curricula(self, first_init=False):
"""Initializes the curriculum code.
Args:
first_init: If true, container variables are created. Should
be false for all subsequent calls.
Returns:
True if curriculum has been fully established, False otherwise.
"""
num_bins_distance = int(math.ceil(
self._max_goal_distance / self._curriculum_bin_distance))
if first_init:
self._curriculum_count = 0
self._trajectory_data_map_per_distance = {
k: [] for k in range(num_bins_distance + 1)
}
self._trajectory_data_map_per_waypoints = {
k: [] for k in range(self._num_instructions + 1)
}
if self._curriculum_type == CURRICULUM_LENGTH_BASED:
# Bin the trajectories by length
for index in range(self._num_trajectories):
v = self._trajectory_data[index]
self._curriculum_count += 1
# Note: goal.length stores the overall length of a route.
bin_distance = int(
math.ceil(v.goal.length / self._curriculum_bin_distance))
for k in range(bin_distance, num_bins_distance + 1):
self._trajectory_data_map_per_distance[k].append(index)
if (self._curriculum_count % NUM_TRAJECTORIES_VERBOSE) == 0:
logging.info('Processed %d trajectories', self._curriculum_count)
return False
for k in range(num_bins_distance + 1):
logging.info('%d trajectories with distance at most %f',
len(self._trajectory_data_map_per_distance[k]),
k * self._curriculum_bin_distance)
if self._curriculum_type == CURRICULUM_INSTR_BASED:
# Bin the trajectories by number of instructions (waypoints)
for index in range(self._num_trajectories):
v = self._trajectory_data[index]
self._curriculum_count += 1
num_waypoints = len(v.steps)
for k in range(num_waypoints, self._num_instructions + 1):
self._trajectory_data_map_per_waypoints[k].append(index)
if (self._curriculum_count % NUM_TRAJECTORIES_VERBOSE) == 0:
logging.info('Processed %d trajectories', self._curriculum_count)
return False
for k in range(self._num_instructions + 1):
logging.info('%d trajectories with %d instructions',
len(self._trajectory_data_map_per_waypoints[k]), k)
if self._curriculum_type == CURRICULUM_LENGTH_INSTR_BASED:
# Bin the trajectories by length and instructions
for index in range(self._num_trajectories):
v = self._trajectory_data[index]
self._curriculum_count += 1
bin_distance = int(
math.ceil(v.goal.length / self._curriculum_bin_distance))
for k in range(bin_distance, num_bins_distance + 1):
self._trajectory_data_map_per_distance[k].append(index)
num_waypoints = len(v.steps)
for k in range(num_waypoints, self._num_instructions + 1):
self._trajectory_data_map_per_waypoints[k].append(index)
if (self._curriculum_count % NUM_TRAJECTORIES_VERBOSE) == 0:
logging.info('Processed %d trajectories', self._curriculum_count)
return False
for k in range(num_bins_distance + 1):
logging.info('%d trajectories with distance at most %f',
len(self._trajectory_data_map_per_distance[k]),
k * self._curriculum_bin_distance)
for k in range(self._num_instructions + 1):
logging.info('%d trajectories with %d instructions',
len(self._trajectory_data_map_per_waypoints[k]), k)
return True
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
# Continue initialization of the curricula.
if not self._init_complete:
self._init_complete = self.initialize_curricula()
return super(InstructionsCurriculum, self).on_reset(streetlearn)
def _ratio_training(self):
"""Updates the fraction of training curriculum based on elapsed time."""
hours_train = (time.time() - self._timestamp_start) / _SECONDS_IN_HOUR
if hours_train > self._hours_curriculum_part_1:
ratio_training = hours_train - self._hours_curriculum_part_1
ratio_training /= self._hours_curriculum_part_2
ratio_training = max(min(ratio_training, 1.0), 0.0)
else:
ratio_training = 0
logging.info('Hours elapsed: %f, ratio: %f', hours_train, ratio_training)
return ratio_training
def _sample_trajectory(self, streetlearn):
"""Sample a trajectory.
Args:
streetlearn: Streetlearn instance.
Returns:
trajectory object.
"""
if self._curriculum_type != CURRICULUM_NONE or self._curriculum_frame_cap:
ratio_training = self._ratio_training()
if self._curriculum_frame_cap:
# Is there a curriculum on the cap on the number of frames?
prev_frame_cap = streetlearn.frame_cap
frame_cap = int(
math.ceil(self._curriculum_frame_cap_part_1 + ratio_training *
self._curriculum_frame_cap_extra_steps))
streetlearn.frame_cap = frame_cap
if prev_frame_cap != frame_cap:
logging.info('Changing frame cap from %d to %d', prev_frame_cap,
frame_cap)
if self._curriculum_type == CURRICULUM_NONE:
# Skip the curriculum sampling
return super(InstructionsCurriculum, self)._sample_trajectory(streetlearn)
if self._curriculum_type == CURRICULUM_LENGTH_BASED:
# Curriculum based on the length/distance (in m) from start to goal.
max_distance = self._min_goal_distance
extra_distance = max(0, self._max_goal_distance - self._min_goal_distance)
max_distance += math.ceil(extra_distance * ratio_training)
logging.info('Max distance: %f', max_distance)
bin_distance = int(math.ceil(
max_distance / self._curriculum_bin_distance))
map_trajectories = self._trajectory_data_map_per_distance[bin_distance]
if self._curriculum_type == CURRICULUM_INSTR_BASED:
# Curriculum based on the number of instructions/waypoints.
max_num_instructions = self._curriculum_num_instructions_part_1
num_extra_instructions = max(
0, self._num_instructions - self._curriculum_num_instructions_part_1)
max_num_instructions += math.ceil(num_extra_instructions * ratio_training)
logging.info('Max #instructions: %d', max_num_instructions)
map_trajectories = self._trajectory_data_map_per_waypoints[
max_num_instructions]
if self._curriculum_type == CURRICULUM_LENGTH_INSTR_BASED:
# Curriculum based both on the number of instructions and on length;
# at the beginning, only short trajectories with few waypoints are sampled
# and at the end, long trajectories with may waypoints are sampled too.
# The final set of trajectories from which one can sample is the
# intersection of the set of length-based curriculum trajectories and of
# the set of instruction-based curriculum trajectories.
max_distance = self._min_goal_distance
extra_distance = max(0, self._max_goal_distance - self._min_goal_distance)
max_distance += math.ceil(extra_distance * ratio_training)
logging.info('Max distance: %f', max_distance)
bin_distance = int(math.ceil(
max_distance / self._curriculum_bin_distance))
map_trajectories_1 = self._trajectory_data_map_per_distance[bin_distance]
max_num_instructions = self._curriculum_num_instructions_part_1
num_extra_instructions = max(
0, self._num_instructions - self._curriculum_num_instructions_part_1)
max_num_instructions += math.ceil(num_extra_instructions * ratio_training)
logging.info('Max #instructions: %d', max_num_instructions)
map_trajectories_2 = self._trajectory_data_map_per_waypoints[
max_num_instructions]
map_trajectories = list(set(map_trajectories_1) & set(map_trajectories_2))
logging.info('Intersection of two sets: %d & %d -> %d',
len(map_trajectories_1), len(map_trajectories_2),
len(map_trajectories))
if map_trajectories:
i = np.random.choice(map_trajectories)
self._trajectory = self._trajectory_data[i]
return self._trajectory
logging.info('Could not find trajectories for ratio training time/steps %f',
ratio_training)
logging.info('Sampling trajectory without curriculum')
self._trajectory = super(StreetLangTimedCurriculum,
self)._sample_trajectory(streetlearn)
return self._trajectory
|
streetlearn-master
|
streetlearn/python/environment/instructions_curriculum.py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the single instruction-following game with curriculum.
In this environment, the agent receives a reward for every waypoint it hits
as well as a larger reward for reaching the final goal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from streetlearn.python.environment import instructions_densification
class IncrementalInstructionGame(
instructions_densification.InstructionsDensification):
"""StreetLang game with goal and waypoint rewards."""
def __init__(self, config):
"""Creates an instance of the StreetLearn level.
Args:
config: config dict of various settings.
"""
super(IncrementalInstructionGame, self).__init__(config)
# Verify that waypoints receive reward.
assert self._reward_at_waypoint > 0, "Waypoint reward should be nonzero."
|
streetlearn-master
|
streetlearn/python/environment/incremental_instruction_game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coin game level.
In this environment, the agent receives a reward for every coin it collects.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
from streetlearn.engine.python import color
from streetlearn.python.environment import game
class CoinGame(game.Game):
"""A simple game that allows an agent to explore the environment and collect
coins yielding rewards, randomly scattered through the environment. Can be
extended as needed to add more complex game logic."""
def __init__(self, config):
"""Constructor.
Panos can be assigned rewards (coins) randomly and the agent will receive
the reward the first time they visit these panos.
Args:
config: config dict of various settings.
"""
super(CoinGame, self).__init__()
# Create colors from the input lists.
self._colors = {
'coin': color.Color(*config['color_for_coin']),
'touched': color.Color(*config['color_for_touched_pano']),
}
self._reward_per_coin = config['reward_per_coin']
# List of panos (will be populated using the streetlearn object).
self._pano_ids = None
# Association between pano id and color.
self._pano_id_to_color = {}
# Panos that (can) contain coins.
self._proportion_of_panos_with_coins = config[
'proportion_of_panos_with_coins']
self._touched_pano_id_set = set()
self._coin_pano_id_set = []
self._num_coins = 0
logging.info('Proportion of panos with coins: %f',
self._proportion_of_panos_with_coins)
logging.info('Reward per coin: %f', self._reward_per_coin)
def on_step(self, streetlearn):
"""Gets called after StreetLearn:step(). Updates the set of touched panos.
Args:
streetlearn: A streetlearn instance.
"""
self._touched_pano_id_set.add(streetlearn.current_pano_id)
self._update_pano_id_to_color()
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Clears the set of touched panos and randomly generates reward-yielding coins
and populates pano_id_to_color.
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
logging.info('seed %d, %d/%d coins left, cache size %d, current pano: %s',
streetlearn.seed, len(self._coin_pano_id_set), self._num_coins,
streetlearn.cache_size, streetlearn.current_pano_id)
# Populate list of available panos.
if not self._pano_ids:
self._pano_ids = sorted(streetlearn.graph)
self._touched_pano_id_set.clear()
num_pano_ids = len(self._pano_ids)
self._num_coins = int(self._proportion_of_panos_with_coins * num_pano_ids)
print("Sampling {} coins in graph of {} panos.".format(
self._num_coins, num_pano_ids))
self._coin_pano_id_set = np.random.choice(
self._pano_ids, self._num_coins, replace=False).tolist()
self._pano_id_to_color = {coin_pano_id: self._colors['coin']
for coin_pano_id in self._coin_pano_id_set}
return self._pano_id_to_color
def get_reward(self, streetlearn):
"""Returns the reward from the last step.
In this game, we give rewards when a coin is collected. Coins can be
collected only once per episode and do not reappear.
Args:
streetlearn: a StreetLearn instance.
Returns:
reward: the reward from the last step.
"""
if streetlearn.current_pano_id in self._coin_pano_id_set:
reward = self._reward_per_coin
self._coin_pano_id_set.remove(streetlearn.current_pano_id)
else:
reward = 0
return reward
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = {}
info['num_coins_left'] = len(self._coin_pano_id_set)
info['num_coins'] = self._num_coins
info['current_pano_id'] = streetlearn.current_pano_id
return info
def done(self):
"""Returns a flag indicating the end of the current episode.
This game does not end when all the coins are collected.
"""
return False
def highlighted_panos(self):
"""Returns the list of highlighted panos and their colors."""
return self._pano_id_to_color
def _update_pano_id_to_color(self):
"""Update the pano id to color table."""
self._pano_id_to_color.update({touched_pano_id: self._colors['touched']
for touched_pano_id
in self._touched_pano_id_set})
|
streetlearn-master
|
streetlearn/python/environment/coin_game.py
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup module for turning StreetLearn into a pip package.
Based on: https://github.com/google/nucleus/blob/master/nucleus/pip_package/setup.py
This should be invoked through build_pip_package.sh, rather than run directly.
"""
import fnmatch
import os
from setuptools import find_packages
from setuptools import setup
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
def is_python_file(fn):
return fn.endswith('.py') or fn.endswith('.pyc')
headers = list(find_files('*.h', 'streetlearn'))
matches = ['../' + x for x in find_files('*', 'external')
if not is_python_file(x)]
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
for path in so_lib_paths:
matches.extend(['../' + x for x in find_files('*', path)
if not is_python_file(x)])
setup(
name='streetlearn',
version='0.1.0',
description='A library to aid navigation research.',
long_description=
"""
TODO
""",
url='https://github.com/deepmind/streetlearn',
author='The StreetLearn Team at DeepMind',
author_email='streetlearn@google.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='navigation tensorflow machine learning',
packages=find_packages(exclude=['g3doc', 'testdata']),
install_requires=['six', 'absl-py', 'inflection', 'wrapt', 'numpy',
'dm-sonnet', 'tensorflow', 'tensorflow-probability'],
headers=headers,
include_package_data=True,
package_data={'streetlearn': matches},
data_files=[],
entry_points={},
project_urls={
'Source': 'https://github.com/deepmind/streetlearn',
'Bug Reports': 'https://github.com/deepmind/streetlearn/issues',
},
zip_safe=False,
)
|
streetlearn-master
|
streetlearn/pip_package/setup.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for streetlearn_engine clif bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from absl.testing import absltest
from streetlearn.engine.python import streetlearn_engine
from streetlearn.engine.python.test_dataset import TestDataset
_PANO_OFFSET = 32
class StreetlearnEngineTest(absltest.TestCase):
def setUp(self):
TestDataset.Generate()
self.dataset_path = TestDataset.GetPath()
def test_build_graph(self):
engine = streetlearn_engine.StreetLearnEngine.Create(
self.dataset_path, TestDataset.kImageWidth, TestDataset.kImageHeight)
engine.InitEpisode(0, 0)
root = engine.BuildGraphWithRoot('1')
self.assertEqual(root, '1')
# Check that the right sized graph is returned.
engine.BuildRandomGraph()
graph = engine.GetGraph()
self.assertEqual(len(graph), TestDataset.kPanoCount)
def test_set_position(self):
engine = streetlearn_engine.StreetLearnEngine.Create(
self.dataset_path, TestDataset.kImageWidth, TestDataset.kImageHeight)
engine.InitEpisode(0, 0)
engine.BuildGraphWithRoot('1')
# Set position a couple of times and check the result.
self.assertEqual(engine.SetPosition('1'), '1')
self.assertEqual(engine.GetPano().id, '1')
self.assertEqual(engine.SetPosition('2'), '2')
self.assertEqual(engine.GetPano().id, '2')
# Currently facing north so cannot move to the next pano.
self.assertEqual(engine.MoveToNextPano(), '2')
# Rotate to face the next pano and move should succeed.
engine.RotateObserver(_PANO_OFFSET, 0.0)
self.assertEqual(engine.MoveToNextPano(), '3')
self.assertEqual(engine.GetPano().id, '3')
def test_pano_calculations(self):
engine = streetlearn_engine.StreetLearnEngine.Create(
self.dataset_path, TestDataset.kImageWidth, TestDataset.kImageHeight)
engine.InitEpisode(0, 0)
engine.BuildGraphWithRoot('1')
self.assertEqual(engine.GetPitch(), 0)
self.assertEqual(engine.GetYaw(), 0)
self.assertAlmostEqual(engine.GetPanoDistance('1', '2'), 130.902, 3)
def test_observation(self):
engine = streetlearn_engine.StreetLearnEngine.Create(
self.dataset_path, TestDataset.kImageWidth, TestDataset.kImageHeight)
engine.InitEpisode(0, 0)
engine.BuildGraphWithRoot('1')
# Check that obervations have the right values.
buffer_size = 3 * TestDataset.kImageWidth * TestDataset.kImageHeight
obs = np.zeros(buffer_size, dtype=np.ubyte)
engine.RenderObservation(obs)
for i in range(0, TestDataset.kImageHeight):
for j in range(0, TestDataset.kImageWidth):
index = i * TestDataset.kImageWidth + j
self.assertIn(obs[index], range(0, 232))
def test_neighbors(self):
engine = streetlearn_engine.StreetLearnEngine.Create(
self.dataset_path, TestDataset.kImageWidth, TestDataset.kImageHeight)
engine.InitEpisode(0, 0)
engine.BuildGraphWithRoot('1')
engine.SetPosition('2')
# Should have two neighbors.
occupancy = engine.GetNeighborOccupancy(4)
self.assertEqual(len(occupancy), 4)
self.assertEqual(occupancy[0], 1)
self.assertEqual(occupancy[1], 0)
self.assertEqual(occupancy[2], 1)
self.assertEqual(occupancy[3], 0)
def test_metadata(self):
engine = streetlearn_engine.StreetLearnEngine.Create(
self.dataset_path, TestDataset.kImageWidth, TestDataset.kImageHeight)
engine.InitEpisode(0, 0)
engine.BuildGraphWithRoot('1')
# Check that the right metadata is returned.
metadata = engine.GetMetadata('1')
self.assertEqual(metadata.pano.id, '1')
self.assertEqual(len(metadata.neighbors), 1)
self.assertEqual(metadata.neighbors[0].id, '2')
self.assertEqual(metadata.graph_depth, 10)
if __name__ == '__main__':
absltest.main()
|
streetlearn-master
|
streetlearn/engine/python/streetlearn_engine_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
streetlearn-master
|
streetlearn/engine/python/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XManager."""
|
xmanager-main
|
__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration specifying XManager dependencies."""
from setuptools import find_namespace_packages
from setuptools import setup
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='xmanager',
version='0.4.0',
description='A framework for managing machine learning experiments',
long_description=long_description,
long_description_content_type='text/markdown',
author='DeepMind Technologies Limited',
packages=find_namespace_packages(exclude=['examples.*']),
package_data={'': ['*.sh', '*.sql', '*.ini', '*.mako']},
python_requires='>=3.10',
install_requires=[
'absl-py',
'alembic==1.4.3',
'async_generator',
'attrs',
'cloud-sql-python-connector',
'docker',
'etils[epath]',
'google-api-core',
'google-api-python-client',
'google-auth',
'google-cloud-aiplatform',
'google-cloud-storage',
'humanize',
'immutabledict',
'kubernetes',
'pyyaml',
'sqlalchemy==1.2.19',
'sqlparse',
'termcolor',
],
entry_points={
'console_scripts': [
'xmanager = xmanager.cli.cli:entrypoint',
],
},
# https://github.com/pypa/warehouse/blob/de4a2e5e2ec26d01bf7813da427ebc4725dccde9/warehouse/templates/packaging/detail.html#L20-L60
project_urls={
'Homepage': 'https://github.com/deepmind/xmanager',
'Issue tracker': 'https://github.com/deepmind/xmanager/issues',
},
)
|
xmanager-main
|
setup.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10 using TF's MultiWorkerMirroredStrategy using the Kubernetes back-end.
Usage:
xmanager launch examples/cifar10_tensorflow_k8s_multiworker/launcher.py -- \
--xm_wrap_late_bindings [--image_path=gcr.io/path/to/image/tag]
"""
import itertools
from absl import app
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import xm_tensorflow
def main(_):
with xm_local.create_experiment(
experiment_title='kubernetes_multiworker'
) as experiment:
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/tf2-gpu.2-6',
entrypoint=xm.ModuleName('cifar10'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Kubernetes.Spec(),
args={},
),
]
)
learning_rates = [0.001]
trials = list(
dict([('learning_rate', lr)])
for (lr,) in itertools.product(learning_rates)
)
builder = xm_tensorflow.MultiWorkerMirroredStrategyBuilder(
experiment=experiment,
worker_executable=executable,
worker_executor=xm_local.Kubernetes(
requirements=xm.JobRequirements(t4=1)
),
worker_name='worker',
num_workers=3,
)
for hyperparameters in trials:
experiment.add(builder.gen_job_group(), args=hyperparameters)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_k8s_multiworker/launcher.py
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
import os
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
LOG_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '')
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))
)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
callbacks = []
if LOG_DIR:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=LOG_DIR,
histogram_freq=1,
),
]
(train_images, train_labels), (test_images, test_labels) = (
datasets.cifar10.load_data()
)
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)
)
validation_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)
)
train_dataset = train_dataset.batch(32)
validation_dataset = validation_dataset.batch(32)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
)
train_dataset.with_options(options)
validation_dataset.with_options(options)
model.fit(
train_dataset,
epochs=FLAGS.epochs,
validation_data=validation_dataset,
callbacks=callbacks,
verbose=2,
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_k8s_multiworker/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10.
Usage:
xmanager launch examples/cifar10_tensorflow_tpu/launcher.py
"""
import asyncio
import itertools
import os
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.cloud import build_image
from xmanager.cloud import vertex
from xmanager.contrib import tpu
FLAGS = flags.FLAGS
flags.DEFINE_string('tensorboard', None, 'Tensorboard instance.')
def main(_):
with xm_local.create_experiment(experiment_title='cifar10') as experiment:
directory = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
# pyformat: disable
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
# tpuvm requires Python3.8 and GLIBC_2.29, which requires at least
# debian:11 or ubuntu:20.04
base_image='ubuntu:20.04',
docker_instructions=(
['RUN apt-get update && apt-get install -y python-is-python3 python3-pip wget'] + # pylint: disable=line-too-long
build_image.default_steps(directory, use_deep_module=False) +
tpu.tpuvm_docker_instructions()),
entrypoint=xm.ModuleName('cifar10'),
)
# pyformat: enable
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
args={},
),
]
)
learning_rates = [0.1, 0.001]
trials = list(
dict([('learning_rate', lr)])
for (lr,) in itertools.product(learning_rates)
)
tensorboard = FLAGS.tensorboard
if not tensorboard:
tensorboard = vertex.get_default_client().get_or_create_tensorboard(
'cifar10'
)
tensorboard = asyncio.get_event_loop().run_until_complete(tensorboard)
for i, hyperparameters in enumerate(trials):
output_dir = os.environ.get('GOOGLE_CLOUD_BUCKET_NAME', None)
if output_dir:
output_dir = os.path.join(
output_dir, str(experiment.experiment_id), str(i)
)
tensorboard_capability = xm_local.TensorboardCapability(
name=tensorboard, base_output_directory=output_dir
)
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Vertex(
requirements=xm.JobRequirements(tpu_v2=8),
tensorboard=tensorboard_capability,
),
args=hyperparameters,
)
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_tpu/launcher.py
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
import os
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
LOG_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '')
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
(train_images, train_labels), (test_images, test_labels) = (
datasets.cifar10.load_data()
)
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='local')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))
)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
callbacks = []
if LOG_DIR:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=LOG_DIR,
histogram_freq=1,
),
]
model.fit(
train_images,
train_labels,
epochs=FLAGS.epochs,
validation_data=(test_images, test_labels),
callbacks=callbacks,
verbose=2,
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_tpu/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10.
Usage:
xmanager launch examples/cifar10_torch/launcher.py -- \
--xm_wrap_late_bindings \
[--image_path=gcr.io/path/to/image/tag] \
[--platform=gpu]
"""
import itertools
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
FLAGS = flags.FLAGS
flags.DEFINE_string('image_path', None, 'Image path.')
flags.DEFINE_string('platform', 'cpu', 'cpu/gpu/tpu.')
flags.DEFINE_integer('cores', 1, 'Number of cores. Use 8 if platform==tpu.')
def main(_):
with xm_local.create_experiment(experiment_title='cifar10') as experiment:
if FLAGS.image_path:
spec = xm.Container(image_path=FLAGS.image_path)
else:
# Package the current directory that this script is in.
spec = xm.PythonContainer(
path='.',
# This base_image is experimental and works with cpu/gpu/tpu.
# https://cloud.google.com/ai-platform/deep-learning-containers/docs/choosing-container
base_image='gcr.io/deeplearning-platform-release/pytorch-xla.1-8',
entrypoint=xm.ModuleName('cifar10'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
args={'platform': FLAGS.platform},
),
]
)
batch_sizes = [64, 1024]
learning_rates = [0.1, 0.001]
trials = list(
dict([('batch_size', bs), ('learning_rate', lr)])
for (bs, lr) in itertools.product(batch_sizes, learning_rates)
)
requirements = xm.JobRequirements()
if FLAGS.platform == 'gpu':
requirements = xm.JobRequirements(t4=FLAGS.cores)
elif FLAGS.platform == 'tpu':
requirements = xm.JobRequirements(tpu_v3=8)
for hyperparameters in trials:
jobs = {}
jobs['coordinator'] = xm.Job(
executable=executable,
executor=xm_local.Vertex(requirements),
args=hyperparameters,
)
experiment.add(xm.JobGroup(**jobs))
break
print('Waiting for async launches to return values...')
print('Launch completed and successful.')
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_torch_xla/launcher.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for CIFAR-10 using PyTorch XLA on TPUs."""
import os
from absl import app
from absl import flags
import torch
from torch import nn
from torch import optim
import torch_xla # pylint: disable=unused-import
from torch_xla.core import xla_model
from torch_xla.debug import metrics
from torch_xla.distributed import parallel_loader
from torch_xla.distributed import xla_multiprocessing as xmp
import torchvision
from torchvision import datasets
from torchvision import transforms
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_integer('batch_size', 128, 'batch size')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate')
flags.DEFINE_float('momentum', 0.9, 'momentum')
flags.DEFINE_string('platform', 'cpu', 'cpu/gpu/tpu')
_DATA_DIR = '/tmp/cifar10/'
_SERIAL_EXEC = xmp.MpSerialExecutor()
def get_dataset():
"""Download the datasets."""
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = datasets.CIFAR10(
root=_DATA_DIR, train=True, download=True, transform=transform
)
test_dataset = datasets.CIFAR10(
root=_DATA_DIR, train=False, download=True, transform=transform
)
return train_dataset, test_dataset
def train(model, loader, optimizer, loss_fn):
"""Train the model."""
model.train()
for images, labels in loader:
optimizer.zero_grad()
output = model(images)
loss = loss_fn(output, labels)
loss.backward()
xla_model.optimizer_step(optimizer)
def test(model, loader):
"""Test the model for accuracy."""
model.eval()
total = 0
correct = 0
with torch.no_grad():
for images, labels in loader:
output = model(images)
predictions = output.max(1, keepdim=True)[1]
total += labels.size(0)
correct += predictions.eq(labels.view_as(predictions)).sum().item()
return correct / total
def _mp_fn(_, args):
"""Multiprocessing main function to call."""
torch.manual_seed(0)
# Using the serial executor avoids multiple processes
# to download the same data.
train_dataset, test_dataset = _SERIAL_EXEC.run(get_dataset)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=xla_model.xrt_world_size(),
rank=xla_model.get_ordinal(),
shuffle=True,
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args['batch_size'],
sampler=train_sampler,
drop_last=True,
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args['batch_size'], shuffle=False, drop_last=True
)
device = xla_model.xla_device()
resnet_model = torchvision.models.resnet18(pretrained=False)
wrapped_model = xmp.MpModelWrapper(resnet_model)
model = wrapped_model.to(device)
optimizer = optim.SGD(
model.parameters(), lr=args['learning_rate'], momentum=args['momentum']
)
loss_fn = nn.NLLLoss()
for epoch in range(args['epochs']):
para_loader = parallel_loader.ParallelLoader(train_loader, [device])
train(model, para_loader.per_device_loader(device), optimizer, loss_fn)
para_loader = parallel_loader.ParallelLoader(test_loader, [device])
accuracy = test(model, para_loader.per_device_loader(device))
xla_model.master_print('Finished training epoch {}'.format(epoch))
xla_model.master_print(
'[xla:{}] Accuracy={:.2f}%'.format(xla_model.get_ordinal(), accuracy),
flush=True,
)
xla_model.master_print(metrics.metrics_report(), flush=True)
def main(_):
nprocs = 1
if FLAGS.platform == 'gpu':
os.environ['GPU_NUM_DEVICES'] = str(torch.cuda.device_count())
os.environ['XLA_FLAGS'] = '--xla_gpu_cuda_data_dir=/usr/local/cuda/'
nprocs = 1
elif FLAGS.platform == 'tpu':
# Only import tensorflow to get TPUClusterResolver()
import tensorflow as tf # pylint: disable=g-import-not-at-top
cluster = tf.distribute.cluster_resolver.TPUClusterResolver()
print('TPU master:', cluster.master())
master = cluster.master().split('://')[-1]
os.environ['XRT_TPU_CONFIG'] = f'tpu_worker;0;{master}'
nprocs = 8
# We convert FLAGS to a dict, so it can be passed to spawned processes.
# When calling spawn, FLAGS will be reset and the parsed values are lost.
# The FLAGS object cannot be passed via args because it cannot be pickled.
args = dict(FLAGS.__flags) # pylint: disable=protected-access
for k in args:
args[k] = args[k].value if args[k].present else args[k].default
xmp.spawn(
_mp_fn,
nprocs=nprocs,
start_method='fork',
args=(args,),
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_torch_xla/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The entry point for running a Dopamine agent."""
import os
from absl import app
from absl import flags
from absl import logging
from dopamine.discrete_domains import run_experiment
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
'gin_files',
[],
(
'List of paths to gin configuration files (e.g.'
'"dopamine/agents/dqn/dqn.gin").'
),
)
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
BASE_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '/tmp/dopamine_runs')
def main(unused_argv):
logging.set_verbosity(logging.INFO)
tf.compat.v1.disable_v2_behavior()
run_experiment.load_gin_configs(FLAGS.gin_files, [])
runner = run_experiment.create_runner(BASE_DIR)
runner.run_experiment()
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/dopamine/train.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Launcher for Dopamine.
Usage:
xmanager launch examples/dopaminelauncher.py -- \
--gin_file=https://raw.githubusercontent.com/google/dopamine/master/dopamine/agents/dqn/configs/dqn_mountaincar.gin
"""
import asyncio
import os
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.cloud import vertex
FLAGS = flags.FLAGS
flags.DEFINE_string(
'gin_file',
'https://raw.githubusercontent.com/google/dopamine/master/dopamine/agents/dqn/configs/dqn_mountaincar.gin',
'Gin file pulled from https://github.com/google/dopamine.',
)
flags.DEFINE_string('tensorboard', None, 'Tensorboard instance.')
def main(_):
with xm_local.create_experiment(experiment_title='dopamine') as experiment:
gin_file = os.path.basename(FLAGS.gin_file)
add_instruction = f'ADD {FLAGS.gin_file} {gin_file}'
if FLAGS.gin_file.startswith('http'):
add_instruction = f'RUN wget -O ./{gin_file} {FLAGS.gin_file}'
spec = xm.PythonContainer(
docker_instructions=[
'RUN apt update && apt install -y python3-opencv',
'RUN pip install dopamine-rl',
'COPY dopamine/ workdir',
'WORKDIR workdir',
add_instruction,
],
entrypoint=xm.ModuleName('train'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
args={
'gin_files': gin_file,
},
),
]
)
tensorboard = FLAGS.tensorboard
if not tensorboard:
tensorboard = vertex.get_default_client().get_or_create_tensorboard(
'cifar10'
)
tensorboard = asyncio.get_event_loop().run_until_complete(tensorboard)
output_dir = os.environ['GOOGLE_CLOUD_BUCKET_NAME']
output_dir = os.path.join(output_dir, str(experiment.experiment_id))
tensorboard_capability = xm_local.TensorboardCapability(
name=tensorboard, base_output_directory=output_dir
)
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Vertex(
xm.JobRequirements(t4=1), tensorboard=tensorboard_capability
),
)
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/dopamine/launcher.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10.
Usage:
xmanager launch examples/cifar10_torch/launcher.py -- \
--xm_wrap_late_bindings [--image_path=gcr.io/path/to/image/tag]
"""
import itertools
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.cloud import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('image_path', None, 'Image path.')
flags.DEFINE_integer('nodes', 1, 'Number of nodes.')
flags.DEFINE_integer('gpus_per_node', 2, 'Number of GPUs per node.')
@xm.run_in_asyncio_loop
async def main(_):
async with xm_local.create_experiment(
experiment_title='cifar10'
) as experiment:
if FLAGS.image_path:
spec = xm.Container(image_path=FLAGS.image_path)
else:
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/pytorch-gpu.1-12',
entrypoint=xm.ModuleName('cifar10'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
args={
# TODO: replace workerpool0 with the actual
# name of the job when Vertex AI supports custom name worker
# pools.
'master_addr_port': xm.ShellSafeArg(
utils.get_workerpool_address('workerpool0')
),
},
),
]
)
batch_sizes = [64, 1024]
learning_rates = [0.1, 0.001]
trials = list(
dict([('batch_size', bs), ('learning_rate', lr)])
for (bs, lr) in itertools.product(batch_sizes, learning_rates)
)
work_units = []
for hyperparameters in trials:
job_group = xm.JobGroup()
for i in range(FLAGS.nodes):
hyperparameters = dict(hyperparameters)
hyperparameters['world_size'] = FLAGS.nodes
hyperparameters['rank'] = i
job_group.jobs[f'node_{i}'] = xm.Job(
executable=executable,
executor=xm_local.Vertex(
xm.JobRequirements(t4=FLAGS.gpus_per_node)
),
args=hyperparameters,
)
work_units.append(await experiment.add(job_group))
print('Waiting for async launches to return values...')
for work_unit in work_units:
await work_unit.wait_until_complete()
print('Experiment completed.')
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_torch/launcher.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for CIFAR-10 using PyTorch on GPUs.
This script has been tested with
image: nvidia/cuda:10.1
pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101
CUDA >= 10.2 returns an NCCL error.
"""
from absl import app
from absl import flags
import numpy as np
import torch
from torch import distributed
from torch import multiprocessing
from torch import nn
from torch import optim
from torch.nn import parallel
from torch.utils import data
import torchvision
from torchvision import transforms
# pylint: disable=g-import-not-at-top
try:
from xmanager.cloud import utils as vertex_utils
except ModuleNotFoundError:
import vertex_utils # a copy of vertex_utils.py is present in the directory.
# pylint: enable=g-import-not-at-top
FLAGS = flags.FLAGS
flags.DEFINE_string('master_addr_port', None, 'master address and port.')
flags.DEFINE_integer(
'world_size', 1, 'Number of nodes/clusters participating in the job.'
)
flags.DEFINE_integer('rank', 0, 'Rank of the current node/cluster.')
flags.DEFINE_string('train_dir', '/tmp/cifar10/train', 'train directory')
flags.DEFINE_string('test_dir', '/tmp/cifar10/test', 'test directory')
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_integer('batch_size', 128, 'batch size')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate')
flags.DEFINE_float('momentum', 0.9, 'momentum')
def train(model, train_loader, device, optimizer, criterion):
"""Train the model."""
model.train()
for images, labels in train_loader:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
def test(model, device, test_loader):
"""Test the model."""
model.eval()
total = 0
correct = 0
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
output = model(images)
predictions = output.max(1, keepdim=True)[1]
total += labels.size(0)
correct += predictions.eq(labels.view_as(predictions)).sum().item()
return correct / total
def main_worker(
gpu, master_addr, master_port, world_size, node_rank, ngpus_per_node, args
):
"""The main method each spawned process runs."""
world_size = world_size * ngpus_per_node
world_rank = node_rank * ngpus_per_node + gpu
tcp_address = f'tcp://{master_addr}:{master_port}'
distributed.init_process_group(
backend='nccl',
init_method=tcp_address,
world_size=world_size,
rank=world_rank,
)
# It is the user's responsibility that each process has the same model.
torch.manual_seed(0)
np.random.seed(0)
batch_size = args['batch_size'] // ngpus_per_node
model = torchvision.models.resnet18(pretrained=False)
device = torch.device('cuda:{}'.format(gpu))
model = model.to(device)
model = parallel.DistributedDataParallel(model, device_ids=[gpu])
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = torchvision.datasets.CIFAR10(
root=args['train_dir'], train=True, download=False, transform=transform
)
test_set = torchvision.datasets.CIFAR10(
root=args['test_dir'], train=False, download=False, transform=transform
)
train_sampler = data.distributed.DistributedSampler(dataset=train_set)
train_loader = data.DataLoader(
dataset=train_set,
pin_memory=True,
batch_size=batch_size,
sampler=train_sampler,
)
test_loader = data.DataLoader(
dataset=test_set, pin_memory=True, shuffle=False
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(), lr=args['learning_rate'], momentum=args['momentum']
)
for epoch in range(args['epochs']):
print('[rank:{}] epoch {}'.format(world_rank, epoch))
train(model, train_loader, device, optimizer, criterion)
accuracy = test(model, device, test_loader)
print('[rank:{}] accuracy: {}'.format(world_rank, accuracy))
def main(_):
# Download the dataset from the main process only once.
# Otherwise, each spawned process will try to download to the same directory.
torchvision.datasets.CIFAR10(root=FLAGS.train_dir, train=True, download=True)
torchvision.datasets.CIFAR10(root=FLAGS.test_dir, train=False, download=True)
master_addr = None
master_port = None
if FLAGS.master_addr_port is not None:
[master_addr, master_port] = FLAGS.master_addr_port.split(':')
world_size = FLAGS.world_size
node_rank = FLAGS.rank
if master_addr is None or master_port is None:
master_addr, master_port = vertex_utils.get_master_address_port()
world_size, node_rank = vertex_utils.get_world_size_rank()
ngpus_per_node = torch.cuda.device_count()
# We convert FLAGS to a dict, so it can be passed to spawned processes.
# When calling spawn, FLAGS will be reset and the parsed values are lost.
# The FLAGS object cannot be passed via args because it cannot be pickled.
args = dict(FLAGS.__flags) # pylint: disable=protected-access
for k in args:
args[k] = args[k].value if args[k].present else args[k].default
multiprocessing.spawn(
main_worker,
nprocs=ngpus_per_node,
args=(
master_addr,
master_port,
world_size,
node_rank,
ngpus_per_node,
args,
),
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_torch/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XManager launcher that runs an image built from a Dockerfile."""
from typing import Sequence
from absl import app
from xmanager import xm
from xmanager import xm_local
def main(argv: Sequence[str]) -> None:
del argv
with xm_local.create_experiment(
experiment_title='Example using Dockerfile()'
) as experiment:
executable_spec = xm.Dockerfile()
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=executable_spec,
executor_spec=xm_local.Vertex.Spec(),
),
]
)
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Vertex(),
env_vars={'FOO': 'bar'},
args=['--a=1', '--b=2', '--c=3', '--d=4'],
)
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/dockerfile/launcher.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for a parameter controller example.
The given program launches a dummy job on VertexAI, waits for its completion
and then launches another job on Kubernetes. This kind of workflow can be used
to define pipelines.
Usage:
xmanager launch examples/parameter_controller/launcher.py -- \
--xm_db_yaml_config=db_config.yaml
[--xm_k8s_service_account_name=...]
[--xm_gcp_service_account_name=...]
The content of `db_config.yaml` must be updated to match the connection details
to the DB used.
"""
import os
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import parameter_controller
def main(_):
with xm_local.create_experiment(experiment_title='cifar10') as experiment:
@parameter_controller.controller(
executor=xm_local.Kubernetes(),
controller_args={
'xm_k8s_service_account_name': (
flags.FLAGS.xm_k8s_service_account_name
),
'xm_gcp_service_account_name': (
flags.FLAGS.xm_gcp_service_account_name
),
},
controller_env_vars={
'GOOGLE_CLOUD_BUCKET_NAME': os.environ['GOOGLE_CLOUD_BUCKET_NAME'],
},
# Package contents of this directory inside parameter controller job
package_path='.',
)
async def parameter_controller_example(experiment: xm.Experiment):
spec = xm.PythonContainer(
# Package contents of job to be launched
path='inner_job',
base_image='python:3.9',
entrypoint=xm.ModuleName('wait_job'),
)
[vertex_executable, k8s_executable] = experiment.package([
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
args={'time_to_sleep': 10},
),
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Kubernetes.Spec(),
args={'time_to_sleep': 20},
),
])
wu1 = await experiment.add(xm.Job(k8s_executable, xm_local.Kubernetes()))
await wu1.wait_until_complete()
wu2 = await experiment.add(xm.Job(vertex_executable, xm_local.Vertex()))
await wu2.wait_until_complete()
experiment.add(parameter_controller_example()) # pylint: disable=no-value-for-parameter
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/parameter_controller/launcher.py
|
"""Job waiting."""
import time
from absl import app
from absl import flags
_TIME_TO_SLEEP = flags.DEFINE_integer('time_to_sleep', 10, 'Time to sleep.')
def main(_):
print(f'Hello, waiting for {_TIME_TO_SLEEP.value}...')
time.sleep(_TIME_TO_SLEEP.value)
print('Done!')
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/parameter_controller/inner_job/wait_job.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An HTTP server incrementing a value in Redis."""
from typing import Sequence
from absl import app
from absl import flags
import bottle
import bottle.ext.redis
redis_host = flags.DEFINE_string('redis_host', None, "Redis' host.")
server = bottle.Bottle()
@server.route('/increment')
def increment(rdb):
return str(rdb.incr('counter'))
def main(argv: Sequence[str]) -> None:
del argv # Unused.
server.install(bottle.ext.redis.RedisPlugin(host=redis_host.value))
bottle.run(server, host='0.0.0.0', port=8080, debug=True)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/local_container_links/server.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A launcher for server.py and Redis.
See README.md for details.
"""
from typing import Sequence
from absl import app
from xmanager import xm
from xmanager import xm_local
def main(argv: Sequence[str]) -> None:
del argv # Unused.
with xm_local.create_experiment(
experiment_title='local_container_links'
) as experiment:
[redis, server] = experiment.package([
xm.Packageable(
executable_spec=xm.Container(image_path='redis'),
executor_spec=xm_local.Local.Spec(),
),
xm.Packageable(
executable_spec=xm.BazelContainer(label='//:server_image.tar'),
executor_spec=xm_local.Local.Spec(),
),
])
async def generator(work_unit):
work_unit.add(
xm.JobGroup(
server=xm.Job(
executable=server,
executor=xm_local.Local(
docker_options=xm_local.DockerOptions(ports={8080: 8080})
),
args={'redis_host': work_unit.get_full_job_name('redis')},
),
redis=xm.Job(
name='redis',
executable=redis,
executor=xm_local.Local(
docker_options=xm_local.DockerOptions(
volumes={'/tmp/redis': '/data'}
)
),
),
)
)
experiment.add(generator)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/local_container_links/launcher.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for running CIFAR10 on Kubernetes with Tensorboard auxiliary job.
Usage:
xmanager launch examples/cifar10_tensorflow_k8s_tensorboard/launcher.py -- \
--tensorboard_log_dir=TENSORBOARD_LOG_DIR \
[--tensorboard_timeout_secs=TIMEOUT_SECS]
"""
import itertools
import os
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import tensorboard
_TENSORBOARD_LOG_DIR = flags.DEFINE_string(
'tensorboard_log_dir',
None,
'Log directory to be used by workers and Tensorboard.',
)
_TENSORBOARD_TIMEOUT_SECS = flags.DEFINE_integer(
'tensorboard_timeout_secs',
60 * 60,
'The amount of time the Tensorboard job should run for.',
)
def main(_):
with xm_local.create_experiment(experiment_title='cifar10') as experiment:
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/tf2-gpu.2-6',
entrypoint=xm.ModuleName('cifar10'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Kubernetes.Spec(),
),
]
)
learning_rates = [0.1, 0.001]
trials = list(
dict([('learning_rate', lr)])
for (lr,) in itertools.product(learning_rates)
)
log_dir = None
if _TENSORBOARD_LOG_DIR.value:
log_dir = (
f'{_TENSORBOARD_LOG_DIR.value}/{str(experiment.experiment_id)}/logs'
)
if log_dir:
tensorboard.add_tensorboard(
experiment,
log_dir,
executor=xm_local.Kubernetes(),
timeout_secs=_TENSORBOARD_TIMEOUT_SECS.value,
)
for i, hyperparameters in enumerate(trials):
output_dir = os.path.join(log_dir, str(i))
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Kubernetes(),
args=dict({'tensorboard_log_dir': output_dir, **hyperparameters}),
)
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_k8s_tensorboard/launcher.py
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
_TENSORBOARD_LOG_DIR = flags.DEFINE_string('tensorboard_log_dir', None, '')
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
(train_images, train_labels), (test_images, test_labels) = (
datasets.cifar10.load_data()
)
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))
)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
callbacks = []
if _TENSORBOARD_LOG_DIR.value:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=_TENSORBOARD_LOG_DIR.value,
histogram_freq=1,
),
]
model.fit(
train_images,
train_labels,
epochs=FLAGS.epochs,
validation_data=(test_images, test_labels),
callbacks=callbacks,
verbose=2,
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_k8s_tensorboard/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10 using TF's ParameterServerStrategy.
Usage:
xmanager launch examples/cifar10_tensorflow_k8s__ps/launcher.py -- \
--xm_wrap_late_bindings [--image_path=gcr.io/path/to/image/tag]
"""
import itertools
from absl import app
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import xm_tensorflow
def main(_):
with xm_local.create_experiment(
experiment_title='kubernetes_multiworker'
) as experiment:
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/tf2-gpu.2-6',
entrypoint=xm.ModuleName('cifar10'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Kubernetes.Spec(),
args={},
)
]
)
learning_rates = [0.001]
trials = list(
dict([('learning_rate', lr)])
for (lr,) in itertools.product(learning_rates)
)
builder = xm_tensorflow.ParameterServerStrategyBuilder(
experiment=experiment,
chief_executable=executable,
chief_executor=xm_local.Kubernetes(
requirements=xm.JobRequirements(t4=1)
),
worker_executable=executable,
worker_executor=xm_local.Kubernetes(
requirements=xm.JobRequirements(t4=1)
),
worker_name='worker',
ps_executable=executable,
ps_executor=xm_local.Kubernetes(),
ps_name='ps',
num_workers=2,
num_ps=1,
)
for hyperparameters in trials:
experiment.add(builder.gen_job_group(), args=hyperparameters)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_k8s_ps/launcher.py
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
import os
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
if cluster_resolver.task_type in ('worker', 'ps'):
os.environ['GRPC_FAIL_FAST'] = 'use_caller'
server = tf.distribute.Server(
cluster_resolver.cluster_spec(),
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer or 'grpc',
start=True,
)
server.join()
(train_images, train_labels), _ = datasets.cifar10.load_data()
def dataset_fn(input_context):
dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)
).repeat()
dataset = dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id
)
dataset = dataset.batch(64)
dataset = dataset.prefetch(2)
return dataset
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver
)
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))
)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
model.fit(
tf.keras.utils.experimental.DatasetCreator(dataset_fn),
steps_per_epoch=1500,
epochs=FLAGS.epochs,
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow_k8s_ps/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager local launcher for CIFAR10 using GPUs.
Usage:
xmanager launch examples/local_container_gpu/launcher.py -- \
--xm_wrap_late_bindings
"""
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
_EXP_NAME = flags.DEFINE_string(
'exp_name', 'local-cifar10-gpu', 'Name of the experiment.', short_name='n'
)
_INTERACTIVE = flags.DEFINE_bool(
'interactive',
False,
'Launch the container and allow interactive access to it.',
)
def main(argv) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
create_experiment = xm_local.create_experiment
with create_experiment(experiment_title=_EXP_NAME.value) as experiment:
docker_options = xm_local.DockerOptions(interactive=_INTERACTIVE.value)
# Creating local executor with extra flag to track job's progress.
executor = xm_local.Local(
xm.JobRequirements(local_gpu=2),
experimental_stream_output=True,
docker_options=docker_options,
)
# Empty args means nothing is passed into the job.
executable_args = {}
(executable,) = experiment.package(
[
xm.python_container(
executor_spec=executor.Spec(),
args=executable_args,
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/tf2-gpu.2-6',
entrypoint=xm.ModuleName('local_container_gpu.cifar10'),
use_deep_module=True,
)
]
)
job = xm.Job(executable, executor)
experiment.add(job)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/local_container_gpu/launcher.py
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
import os
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
LOG_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '')
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
(train_images, train_labels), (test_images, test_labels) = (
datasets.cifar10.load_data()
)
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))
)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
callbacks = []
if LOG_DIR:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=LOG_DIR,
histogram_freq=1,
),
]
model.fit(
train_images,
train_labels,
epochs=FLAGS.epochs,
validation_data=(test_images, test_labels),
callbacks=callbacks,
verbose=2,
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/local_container_gpu/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for Polynomial.
Usage:
xmanager launch examples/vizier/launcher.py -- \
--xm_wrap_late_bindings
"""
from absl import app
from google.cloud import aiplatform_v1beta1 as aip
from xmanager import xm
from xmanager import xm_local
from xmanager.vizier import vizier_cloud
def get_study_spec() -> aip.StudySpec:
return aip.StudySpec(
algorithm=aip.StudySpec.Algorithm.RANDOM_SEARCH,
parameters=[
aip.StudySpec.ParameterSpec(
parameter_id='x',
double_value_spec=aip.StudySpec.ParameterSpec.DoubleValueSpec(
min_value=-2.0, max_value=2.0
),
),
aip.StudySpec.ParameterSpec(
parameter_id='y',
double_value_spec=aip.StudySpec.ParameterSpec.DoubleValueSpec(
min_value=-2.0, max_value=2.0
),
),
],
metrics=[
aip.StudySpec.MetricSpec(
metric_id='loss', goal=aip.StudySpec.MetricSpec.GoalType.MINIMIZE
)
],
)
def main(_):
with xm_local.create_experiment(experiment_title='polynomial') as experiment:
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/base-cpu',
entrypoint=xm.ModuleName('polynomial'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
),
]
)
vizier_cloud.VizierExploration(
experiment=experiment,
job=xm.Job(
executable=executable,
executor=xm_local.Vertex(),
),
study_factory=vizier_cloud.NewStudy(study_config=get_study_spec()),
num_trials_total=3,
num_parallel_trial_runs=2,
).launch()
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/vizier/launcher.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the value of a bivariate quadratic polynomial.
An example of finding the min values of x and y with auto hyperparam tuning.
Set a, b, c, d, e, and f to constant args. Set x and y to be hyperparameters.
"""
from absl import app
from absl import flags
from vizier_worker import VizierWorker
FLAGS = flags.FLAGS
flags.DEFINE_integer('a', 1, 'a in ax^2 + by^2 + cxy + dx + ey + f')
flags.DEFINE_integer('b', 1, 'b in ax^2 + by^2 + cxy + dx + ey + f')
flags.DEFINE_integer('c', 0, 'c in ax^2 + by^2 + cxy + dx + ey + f')
flags.DEFINE_integer('d', 1, 'd in ax^2 + by^2 + cxy + dx + ey + f')
flags.DEFINE_integer('e', 1, 'e in ax^2 + by^2 + cxy + dx + ey + f')
flags.DEFINE_integer('f', 1, 'f in ax^2 + by^2 + cxy + dx + ey + f')
flags.DEFINE_float('x', 0, 'The hyperparameter variable X.')
flags.DEFINE_float('y', 0, 'The hyperparameter variable Y.')
flags.DEFINE_string(
'trial_name',
None,
(
'Identifying the current job trial that measurements '
'will be submitted to with `add_trial_measurement`. Format: '
'projects/{project}/locations/{location}/studies/{study}/trials/{trial}'
),
)
def main(_):
worker = VizierWorker(FLAGS.trial_name)
# dummy training loop: "train" for one epoch
metric_value = float(
FLAGS.a * FLAGS.x * FLAGS.x
+ FLAGS.b * FLAGS.y * FLAGS.y
+ FLAGS.c * FLAGS.x * FLAGS.y
+ FLAGS.d * FLAGS.x
+ FLAGS.e * FLAGS.y
+ FLAGS.f
)
worker.add_trial_measurement(1, {'loss': metric_value})
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/vizier/polynomial.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10.
Usage:
xmanager launch examples/cifar10_tensorflow/launcher.py -- \
--xm_wrap_late_bindings [--image_path=gcr.io/path/to/image/tag]
"""
import asyncio
import itertools
import os
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.cloud import vertex
FLAGS = flags.FLAGS
flags.DEFINE_string('tensorboard', None, 'Tensorboard instance.')
def main(_):
with xm_local.create_experiment(experiment_title='cifar10') as experiment:
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
base_image='gcr.io/deeplearning-platform-release/tf2-gpu.2-6',
entrypoint=xm.ModuleName('cifar10'),
)
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
args={},
),
]
)
learning_rates = [0.1, 0.001]
trials = list(
dict([('learning_rate', lr)])
for (lr,) in itertools.product(learning_rates)
)
tensorboard = FLAGS.tensorboard
if not tensorboard:
tensorboard = vertex.get_default_client().get_or_create_tensorboard(
'cifar10'
)
tensorboard = asyncio.get_event_loop().run_until_complete(tensorboard)
for i, hyperparameters in enumerate(trials):
output_dir = os.environ.get('GOOGLE_CLOUD_BUCKET_NAME', None)
if output_dir:
output_dir = os.path.join(
output_dir, str(experiment.experiment_id), str(i)
)
tensorboard_capability = xm_local.TensorboardCapability(
name=tensorboard, base_output_directory=output_dir
)
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Vertex(tensorboard=tensorboard_capability),
args=hyperparameters,
)
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow/launcher.py
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
import os
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
LOG_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '')
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
(train_images, train_labels), (test_images, test_labels) = (
datasets.cifar10.load_data()
)
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))
)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
callbacks = []
if LOG_DIR:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=LOG_DIR,
histogram_freq=1,
),
]
model.fit(
train_images,
train_labels,
epochs=FLAGS.epochs,
validation_data=(test_images, test_labels),
callbacks=callbacks,
verbose=2,
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/cifar10_tensorflow/cifar10.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XManager launcher that runs locally a binary built with Bazel.
One must `cd` into xmanager/examples/local_arg_printer/ in order to run this
example because Bazel needs to locate the WORKSPACE file.
"""
from typing import Sequence
from absl import app
from xmanager import xm
from xmanager import xm_local
def main(argv: Sequence[str]) -> None:
del argv
with xm_local.create_experiment(
experiment_title='local_arg_printer'
) as experiment:
[executable] = experiment.package(
[
xm.Packageable(
executable_spec=xm.BazelBinary(
label='//:arg_printer'
),
executor_spec=xm_local.Local.Spec(),
),
]
)
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Local(),
env_vars={'OUTPUT_PATH': '/tmp/local_arg_printer.txt'},
)
)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
examples/local_arg_printer/launcher.py
|
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: disable
# Generated by the protocol buffer compiler. DO NOT EDIT!
# pylint: skip-file
# source: src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from . import command_line_pb2 as src_dot_main_dot_protobuf_dot_command__line__pb2
from . import failure_details_pb2 as src_dot_main_dot_protobuf_dot_failure__details__pb2
from . import invocation_policy_pb2 as src_dot_main_dot_protobuf_dot_invocation__policy__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n[src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto\x12\x12\x62uild_event_stream\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$src/main/protobuf/command_line.proto\x1a\'src/main/protobuf/failure_details.proto\x1a)src/main/protobuf/invocation_policy.proto\"\x86\x1a\n\x0c\x42uildEventId\x12G\n\x07unknown\x18\x01 \x01(\x0b\x32\x34.build_event_stream.BuildEventId.UnknownBuildEventIdH\x00\x12?\n\x08progress\x18\x02 \x01(\x0b\x32+.build_event_stream.BuildEventId.ProgressIdH\x00\x12\x42\n\x07started\x18\x03 \x01(\x0b\x32/.build_event_stream.BuildEventId.BuildStartedIdH\x00\x12_\n\x19unstructured_command_line\x18\x0b \x01(\x0b\x32:.build_event_stream.BuildEventId.UnstructuredCommandLineIdH\x00\x12[\n\x17structured_command_line\x18\x12 \x01(\x0b\x32\x38.build_event_stream.BuildEventId.StructuredCommandLineIdH\x00\x12N\n\x10workspace_status\x18\x0e \x01(\x0b\x32\x32.build_event_stream.BuildEventId.WorkspaceStatusIdH\x00\x12J\n\x0eoptions_parsed\x18\x0c \x01(\x0b\x32\x30.build_event_stream.BuildEventId.OptionsParsedIdH\x00\x12\x39\n\x05\x66\x65tch\x18\x11 \x01(\x0b\x32(.build_event_stream.BuildEventId.FetchIdH\x00\x12I\n\rconfiguration\x18\x0f \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationIdH\x00\x12P\n\x11target_configured\x18\x10 \x01(\x0b\x32\x33.build_event_stream.BuildEventId.TargetConfiguredIdH\x00\x12\x45\n\x07pattern\x18\x04 \x01(\x0b\x32\x32.build_event_stream.BuildEventId.PatternExpandedIdH\x00\x12M\n\x0fpattern_skipped\x18\n \x01(\x0b\x32\x32.build_event_stream.BuildEventId.PatternExpandedIdH\x00\x12G\n\tnamed_set\x18\r \x01(\x0b\x32\x32.build_event_stream.BuildEventId.NamedSetOfFilesIdH\x00\x12N\n\x10target_completed\x18\x05 \x01(\x0b\x32\x32.build_event_stream.BuildEventId.TargetCompletedIdH\x00\x12N\n\x10\x61\x63tion_completed\x18\x06 \x01(\x0b\x32\x32.build_event_stream.BuildEventId.ActionCompletedIdH\x00\x12R\n\x12unconfigured_label\x18\x13 \x01(\x0b\x32\x34.build_event_stream.BuildEventId.UnconfiguredLabelIdH\x00\x12N\n\x10\x63onfigured_label\x18\x15 \x01(\x0b\x32\x32.build_event_stream.BuildEventId.ConfiguredLabelIdH\x00\x12\x44\n\x0btest_result\x18\x08 \x01(\x0b\x32-.build_event_stream.BuildEventId.TestResultIdH\x00\x12\x46\n\x0ctest_summary\x18\x07 \x01(\x0b\x32..build_event_stream.BuildEventId.TestSummaryIdH\x00\x12J\n\x0etarget_summary\x18\x1a \x01(\x0b\x32\x30.build_event_stream.BuildEventId.TargetSummaryIdH\x00\x12J\n\x0e\x62uild_finished\x18\t \x01(\x0b\x32\x30.build_event_stream.BuildEventId.BuildFinishedIdH\x00\x12K\n\x0f\x62uild_tool_logs\x18\x14 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.BuildToolLogsIdH\x00\x12H\n\rbuild_metrics\x18\x16 \x01(\x0b\x32/.build_event_stream.BuildEventId.BuildMetricsIdH\x00\x12G\n\tworkspace\x18\x17 \x01(\x0b\x32\x32.build_event_stream.BuildEventId.WorkspaceConfigIdH\x00\x12J\n\x0e\x62uild_metadata\x18\x18 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.BuildMetadataIdH\x00\x12k\n\x1f\x63onvenience_symlinks_identified\x18\x19 \x01(\x0b\x32@.build_event_stream.BuildEventId.ConvenienceSymlinksIdentifiedIdH\x00\x1a&\n\x13UnknownBuildEventId\x12\x0f\n\x07\x64\x65tails\x18\x01 \x01(\t\x1a\"\n\nProgressId\x12\x14\n\x0copaque_count\x18\x01 \x01(\x05\x1a\x10\n\x0e\x42uildStartedId\x1a\x1b\n\x19UnstructuredCommandLineId\x1a\x35\n\x17StructuredCommandLineId\x12\x1a\n\x12\x63ommand_line_label\x18\x01 \x01(\t\x1a\x13\n\x11WorkspaceStatusId\x1a\x11\n\x0fOptionsParsedId\x1a\x16\n\x07\x46\x65tchId\x12\x0b\n\x03url\x18\x01 \x01(\t\x1a$\n\x11PatternExpandedId\x12\x0f\n\x07pattern\x18\x01 \x03(\t\x1a\x13\n\x11WorkspaceConfigId\x1a\x11\n\x0f\x42uildMetadataId\x1a\x33\n\x12TargetConfiguredId\x12\r\n\x05label\x18\x01 \x01(\t\x12\x0e\n\x06\x61spect\x18\x02 \x01(\t\x1a\x1f\n\x11NamedSetOfFilesId\x12\n\n\x02id\x18\x01 \x01(\t\x1a\x1d\n\x0f\x43onfigurationId\x12\n\n\x02id\x18\x01 \x01(\t\x1a{\n\x11TargetCompletedId\x12\r\n\x05label\x18\x01 \x01(\t\x12G\n\rconfiguration\x18\x03 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationId\x12\x0e\n\x06\x61spect\x18\x02 \x01(\t\x1a\x83\x01\n\x11\x41\x63tionCompletedId\x12\x16\n\x0eprimary_output\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\t\x12G\n\rconfiguration\x18\x03 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationId\x1a$\n\x13UnconfiguredLabelId\x12\r\n\x05label\x18\x01 \x01(\t\x1ak\n\x11\x43onfiguredLabelId\x12\r\n\x05label\x18\x01 \x01(\t\x12G\n\rconfiguration\x18\x02 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationId\x1a\x93\x01\n\x0cTestResultId\x12\r\n\x05label\x18\x01 \x01(\t\x12G\n\rconfiguration\x18\x05 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationId\x12\x0b\n\x03run\x18\x02 \x01(\x05\x12\r\n\x05shard\x18\x03 \x01(\x05\x12\x0f\n\x07\x61ttempt\x18\x04 \x01(\x05\x1ag\n\rTestSummaryId\x12\r\n\x05label\x18\x01 \x01(\t\x12G\n\rconfiguration\x18\x02 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationId\x1ai\n\x0fTargetSummaryId\x12\r\n\x05label\x18\x01 \x01(\t\x12G\n\rconfiguration\x18\x02 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationId\x1a\x11\n\x0f\x42uildFinishedId\x1a\x11\n\x0f\x42uildToolLogsId\x1a\x10\n\x0e\x42uildMetricsId\x1a!\n\x1f\x43onvenienceSymlinksIdentifiedIdB\x04\n\x02id\"*\n\x08Progress\x12\x0e\n\x06stdout\x18\x01 \x01(\t\x12\x0e\n\x06stderr\x18\x02 \x01(\t\"\xbf\x02\n\x07\x41\x62orted\x12\x37\n\x06reason\x18\x01 \x01(\x0e\x32\'.build_event_stream.Aborted.AbortReason\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\"\xe5\x01\n\x0b\x41\x62ortReason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x14\n\x10USER_INTERRUPTED\x10\x01\x12\x0e\n\nNO_ANALYZE\x10\x08\x12\x0c\n\x08NO_BUILD\x10\t\x12\x0c\n\x08TIME_OUT\x10\x02\x12\x1e\n\x1aREMOTE_ENVIRONMENT_FAILURE\x10\x03\x12\x0c\n\x08INTERNAL\x10\x04\x12\x13\n\x0fLOADING_FAILURE\x10\x05\x12\x14\n\x10\x41NALYSIS_FAILURE\x10\x06\x12\x0b\n\x07SKIPPED\x10\x07\x12\x0e\n\nINCOMPLETE\x10\n\x12\x11\n\rOUT_OF_MEMORY\x10\x0b\"\x81\x02\n\x0c\x42uildStarted\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x1d\n\x11start_time_millis\x18\x02 \x01(\x03\x42\x02\x18\x01\x12.\n\nstart_time\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1a\n\x12\x62uild_tool_version\x18\x03 \x01(\t\x12\x1b\n\x13options_description\x18\x04 \x01(\t\x12\x0f\n\x07\x63ommand\x18\x05 \x01(\t\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x12\x1b\n\x13workspace_directory\x18\x07 \x01(\t\x12\x12\n\nserver_pid\x18\x08 \x01(\x03\"*\n\x0fWorkspaceConfig\x12\x17\n\x0flocal_exec_root\x18\x01 \x01(\t\"\'\n\x17UnstructuredCommandLine\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\t\"\xcf\x01\n\rOptionsParsed\x12\x17\n\x0fstartup_options\x18\x01 \x03(\t\x12 \n\x18\x65xplicit_startup_options\x18\x02 \x03(\t\x12\x10\n\x08\x63md_line\x18\x03 \x03(\t\x12\x19\n\x11\x65xplicit_cmd_line\x18\x04 \x03(\t\x12\x44\n\x11invocation_policy\x18\x05 \x01(\x0b\x32).blaze.invocation_policy.InvocationPolicy\x12\x10\n\x08tool_tag\x18\x06 \x01(\t\"\x18\n\x05\x46\x65tch\x12\x0f\n\x07success\x18\x01 \x01(\x08\"m\n\x0fWorkspaceStatus\x12\x36\n\x04item\x18\x01 \x03(\x0b\x32(.build_event_stream.WorkspaceStatus.Item\x1a\"\n\x04Item\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x83\x01\n\rBuildMetadata\x12\x41\n\x08metadata\x18\x01 \x03(\x0b\x32/.build_event_stream.BuildMetadata.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd7\x01\n\rConfiguration\x12\x10\n\x08mnemonic\x18\x01 \x01(\t\x12\x15\n\rplatform_name\x18\x02 \x01(\t\x12\x0b\n\x03\x63pu\x18\x03 \x01(\t\x12J\n\rmake_variable\x18\x04 \x03(\x0b\x32\x33.build_event_stream.Configuration.MakeVariableEntry\x12\x0f\n\x07is_tool\x18\x05 \x01(\x08\x1a\x33\n\x11MakeVariableEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa8\x01\n\x0fPatternExpanded\x12U\n\x15test_suite_expansions\x18\x01 \x03(\x0b\x32\x36.build_event_stream.PatternExpanded.TestSuiteExpansion\x1a>\n\x12TestSuiteExpansion\x12\x13\n\x0bsuite_label\x18\x01 \x01(\t\x12\x13\n\x0btest_labels\x18\x02 \x03(\t\"e\n\x10TargetConfigured\x12\x13\n\x0btarget_kind\x18\x01 \x01(\t\x12/\n\ttest_size\x18\x02 \x01(\x0e\x32\x1c.build_event_stream.TestSize\x12\x0b\n\x03tag\x18\x03 \x03(\t\"t\n\x04\x46ile\x12\x13\n\x0bpath_prefix\x18\x04 \x03(\t\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x03uri\x18\x02 \x01(\tH\x00\x12\x12\n\x08\x63ontents\x18\x03 \x01(\x0cH\x00\x12\x0e\n\x06\x64igest\x18\x05 \x01(\t\x12\x0e\n\x06length\x18\x06 \x01(\x03\x42\x06\n\x04\x66ile\"\x81\x01\n\x0fNamedSetOfFiles\x12\'\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x18.build_event_stream.File\x12\x45\n\tfile_sets\x18\x02 \x03(\x0b\x32\x32.build_event_stream.BuildEventId.NamedSetOfFilesId\"\xae\x03\n\x0e\x41\x63tionExecuted\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0c\n\x04type\x18\x08 \x01(\t\x12\x11\n\texit_code\x18\x02 \x01(\x05\x12(\n\x06stdout\x18\x03 \x01(\x0b\x32\x18.build_event_stream.File\x12(\n\x06stderr\x18\x04 \x01(\x0b\x32\x18.build_event_stream.File\x12\x11\n\x05label\x18\x05 \x01(\tB\x02\x18\x01\x12K\n\rconfiguration\x18\x07 \x01(\x0b\x32\x30.build_event_stream.BuildEventId.ConfigurationIdB\x02\x18\x01\x12\x30\n\x0eprimary_output\x18\x06 \x01(\x0b\x32\x18.build_event_stream.File\x12\x14\n\x0c\x63ommand_line\x18\t \x03(\t\x12\x36\n\x14\x61\x63tion_metadata_logs\x18\n \x03(\x0b\x32\x18.build_event_stream.File\x12\x36\n\x0e\x66\x61ilure_detail\x18\x0b \x01(\x0b\x32\x1e.failure_details.FailureDetail\"|\n\x0bOutputGroup\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x45\n\tfile_sets\x18\x03 \x03(\x0b\x32\x32.build_event_stream.BuildEventId.NamedSetOfFilesId\x12\x12\n\nincomplete\x18\x04 \x01(\x08J\x04\x08\x02\x10\x03\"\xaa\x03\n\x0eTargetComplete\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x17\n\x0btarget_kind\x18\x05 \x01(\tB\x02\x18\x01\x12\x33\n\ttest_size\x18\x06 \x01(\x0e\x32\x1c.build_event_stream.TestSizeB\x02\x18\x01\x12\x35\n\x0coutput_group\x18\x02 \x03(\x0b\x32\x1f.build_event_stream.OutputGroup\x12\x36\n\x10important_output\x18\x04 \x03(\x0b\x32\x18.build_event_stream.FileB\x02\x18\x01\x12\x32\n\x10\x64irectory_output\x18\x08 \x03(\x0b\x32\x18.build_event_stream.File\x12\x0b\n\x03tag\x18\x03 \x03(\t\x12 \n\x14test_timeout_seconds\x18\x07 \x01(\x03\x42\x02\x18\x01\x12/\n\x0ctest_timeout\x18\n \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x36\n\x0e\x66\x61ilure_detail\x18\t \x01(\x0b\x32\x1e.failure_details.FailureDetail\"\xd2\x07\n\nTestResult\x12.\n\x06status\x18\x05 \x01(\x0e\x32\x1e.build_event_stream.TestStatus\x12\x16\n\x0estatus_details\x18\t \x01(\t\x12\x16\n\x0e\x63\x61\x63hed_locally\x18\x04 \x01(\x08\x12+\n\x1ftest_attempt_start_millis_epoch\x18\x06 \x01(\x03\x42\x02\x18\x01\x12\x36\n\x12test_attempt_start\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\x1ctest_attempt_duration_millis\x18\x03 \x01(\x03\x42\x02\x18\x01\x12\x38\n\x15test_attempt_duration\x18\x0b \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x34\n\x12test_action_output\x18\x02 \x03(\x0b\x32\x18.build_event_stream.File\x12\x0f\n\x07warning\x18\x07 \x03(\t\x12\x44\n\x0e\x65xecution_info\x18\x08 \x01(\x0b\x32,.build_event_stream.TestResult.ExecutionInfo\x1a\x87\x04\n\rExecutionInfo\x12\x1b\n\x0ftimeout_seconds\x18\x01 \x01(\x05\x42\x02\x18\x01\x12\x10\n\x08strategy\x18\x02 \x01(\t\x12\x17\n\x0f\x63\x61\x63hed_remotely\x18\x06 \x01(\x08\x12\x11\n\texit_code\x18\x07 \x01(\x05\x12\x10\n\x08hostname\x18\x03 \x01(\t\x12V\n\x10timing_breakdown\x18\x04 \x01(\x0b\x32<.build_event_stream.TestResult.ExecutionInfo.TimingBreakdown\x12R\n\x0eresource_usage\x18\x05 \x03(\x0b\x32:.build_event_stream.TestResult.ExecutionInfo.ResourceUsage\x1a\xae\x01\n\x0fTimingBreakdown\x12K\n\x05\x63hild\x18\x01 \x03(\x0b\x32<.build_event_stream.TestResult.ExecutionInfo.TimingBreakdown\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x17\n\x0btime_millis\x18\x03 \x01(\x03\x42\x02\x18\x01\x12\'\n\x04time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x1a,\n\rResourceUsage\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03J\x04\x08\x01\x10\x02\"\x9b\x04\n\x0bTestSummary\x12\x36\n\x0eoverall_status\x18\x05 \x01(\x0e\x32\x1e.build_event_stream.TestStatus\x12\x17\n\x0ftotal_run_count\x18\x01 \x01(\x05\x12\x11\n\trun_count\x18\n \x01(\x05\x12\x15\n\rattempt_count\x18\x0f \x01(\x05\x12\x13\n\x0bshard_count\x18\x0b \x01(\x05\x12(\n\x06passed\x18\x03 \x03(\x0b\x32\x18.build_event_stream.File\x12(\n\x06\x66\x61iled\x18\x04 \x03(\x0b\x32\x18.build_event_stream.File\x12\x18\n\x10total_num_cached\x18\x06 \x01(\x05\x12#\n\x17\x66irst_start_time_millis\x18\x07 \x01(\x03\x42\x02\x18\x01\x12\x34\n\x10\x66irst_start_time\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x15last_stop_time_millis\x18\x08 \x01(\x03\x42\x02\x18\x01\x12\x32\n\x0elast_stop_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12%\n\x19total_run_duration_millis\x18\t \x01(\x03\x42\x02\x18\x01\x12\x35\n\x12total_run_duration\x18\x0c \x01(\x0b\x32\x19.google.protobuf.Duration\"k\n\rTargetSummary\x12\x1d\n\x15overall_build_success\x18\x01 \x01(\x08\x12;\n\x13overall_test_status\x18\x02 \x01(\x0e\x32\x1e.build_event_stream.TestStatus\"\xd9\x02\n\rBuildFinished\x12\x1b\n\x0foverall_success\x18\x01 \x01(\x08\x42\x02\x18\x01\x12=\n\texit_code\x18\x03 \x01(\x0b\x32*.build_event_stream.BuildFinished.ExitCode\x12\x1e\n\x12\x66inish_time_millis\x18\x02 \x01(\x03\x42\x02\x18\x01\x12/\n\x0b\x66inish_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12K\n\x0e\x61nomaly_report\x18\x04 \x01(\x0b\x32/.build_event_stream.BuildFinished.AnomalyReportB\x02\x18\x01\x1a&\n\x08\x45xitCode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\x05\x1a&\n\rAnomalyReport\x12\x15\n\rwas_suspended\x18\x01 \x01(\x08\"\xb9\x1a\n\x0c\x42uildMetrics\x12\x46\n\x0e\x61\x63tion_summary\x18\x01 \x01(\x0b\x32..build_event_stream.BuildMetrics.ActionSummary\x12\x46\n\x0ememory_metrics\x18\x02 \x01(\x0b\x32..build_event_stream.BuildMetrics.MemoryMetrics\x12\x46\n\x0etarget_metrics\x18\x03 \x01(\x0b\x32..build_event_stream.BuildMetrics.TargetMetrics\x12H\n\x0fpackage_metrics\x18\x04 \x01(\x0b\x32/.build_event_stream.BuildMetrics.PackageMetrics\x12\x46\n\x0etiming_metrics\x18\x05 \x01(\x0b\x32..build_event_stream.BuildMetrics.TimingMetrics\x12N\n\x12\x63umulative_metrics\x18\x06 \x01(\x0b\x32\x32.build_event_stream.BuildMetrics.CumulativeMetrics\x12J\n\x10\x61rtifact_metrics\x18\x07 \x01(\x0b\x32\x30.build_event_stream.BuildMetrics.ArtifactMetrics\x12O\n\x13\x62uild_graph_metrics\x18\x08 \x01(\x0b\x32\x32.build_event_stream.BuildMetrics.BuildGraphMetrics\x12\x46\n\x0eworker_metrics\x18\t \x03(\x0b\x32..build_event_stream.BuildMetrics.WorkerMetrics\x12H\n\x0fnetwork_metrics\x18\n \x01(\x0b\x32/.build_event_stream.BuildMetrics.NetworkMetrics\x1a\xc9\x03\n\rActionSummary\x12\x17\n\x0f\x61\x63tions_created\x18\x01 \x01(\x03\x12-\n%actions_created_not_including_aspects\x18\x03 \x01(\x03\x12\x18\n\x10\x61\x63tions_executed\x18\x02 \x01(\x03\x12N\n\x0b\x61\x63tion_data\x18\x04 \x03(\x0b\x32\x39.build_event_stream.BuildMetrics.ActionSummary.ActionData\x12\x1d\n\x11remote_cache_hits\x18\x05 \x01(\x03\x42\x02\x18\x01\x12P\n\x0crunner_count\x18\x06 \x03(\x0b\x32:.build_event_stream.BuildMetrics.ActionSummary.RunnerCount\x1ai\n\nActionData\x12\x10\n\x08mnemonic\x18\x01 \x01(\t\x12\x18\n\x10\x61\x63tions_executed\x18\x02 \x01(\x03\x12\x18\n\x10\x66irst_started_ms\x18\x03 \x01(\x03\x12\x15\n\rlast_ended_ms\x18\x04 \x01(\x03\x1a*\n\x0bRunnerCount\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\x1a\x93\x02\n\rMemoryMetrics\x12!\n\x19used_heap_size_post_build\x18\x01 \x01(\x03\x12\x1e\n\x16peak_post_gc_heap_size\x18\x02 \x01(\x03\x12,\n$peak_post_gc_tenured_space_heap_size\x18\x04 \x01(\x03\x12V\n\x0fgarbage_metrics\x18\x03 \x03(\x0b\x32=.build_event_stream.BuildMetrics.MemoryMetrics.GarbageMetrics\x1a\x39\n\x0eGarbageMetrics\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x19\n\x11garbage_collected\x18\x02 \x01(\x03\x1au\n\rTargetMetrics\x12\x16\n\x0etargets_loaded\x18\x01 \x01(\x03\x12\x1a\n\x12targets_configured\x18\x02 \x01(\x03\x12\x30\n(targets_configured_not_including_aspects\x18\x03 \x01(\x03\x1a)\n\x0ePackageMetrics\x12\x17\n\x0fpackages_loaded\x18\x01 \x01(\x03\x1a\x63\n\rTimingMetrics\x12\x16\n\x0e\x63pu_time_in_ms\x18\x01 \x01(\x03\x12\x17\n\x0fwall_time_in_ms\x18\x02 \x01(\x03\x12!\n\x19\x61nalysis_phase_time_in_ms\x18\x03 \x01(\x03\x1a=\n\x11\x43umulativeMetrics\x12\x14\n\x0cnum_analyses\x18\x0b \x01(\x05\x12\x12\n\nnum_builds\x18\x0c \x01(\x05\x1a\xcb\x03\n\x0f\x41rtifactMetrics\x12[\n\x15source_artifacts_read\x18\x02 \x01(\x0b\x32<.build_event_stream.BuildMetrics.ArtifactMetrics.FilesMetric\x12[\n\x15output_artifacts_seen\x18\x03 \x01(\x0b\x32<.build_event_stream.BuildMetrics.ArtifactMetrics.FilesMetric\x12h\n\"output_artifacts_from_action_cache\x18\x04 \x01(\x0b\x32<.build_event_stream.BuildMetrics.ArtifactMetrics.FilesMetric\x12Y\n\x13top_level_artifacts\x18\x05 \x01(\x0b\x32<.build_event_stream.BuildMetrics.ArtifactMetrics.FilesMetric\x1a\x33\n\x0b\x46ilesMetric\x12\x15\n\rsize_in_bytes\x18\x01 \x01(\x03\x12\r\n\x05\x63ount\x18\x02 \x01(\x05J\x04\x08\x01\x10\x02\x1a\xfd\x02\n\x11\x42uildGraphMetrics\x12!\n\x19\x61\x63tion_lookup_value_count\x18\x01 \x01(\x05\x12\x37\n/action_lookup_value_count_not_including_aspects\x18\x05 \x01(\x05\x12\x14\n\x0c\x61\x63tion_count\x18\x02 \x01(\x05\x12*\n\"action_count_not_including_aspects\x18\x06 \x01(\x05\x12*\n\"input_file_configured_target_count\x18\x07 \x01(\x05\x12+\n#output_file_configured_target_count\x18\x08 \x01(\x05\x12%\n\x1dother_configured_target_count\x18\t \x01(\x05\x12\x1d\n\x15output_artifact_count\x18\x03 \x01(\x05\x12+\n#post_invocation_skyframe_node_count\x18\x04 \x01(\x05\x1a\xe1\x02\n\rWorkerMetrics\x12\x15\n\tworker_id\x18\x01 \x01(\x05\x42\x02\x18\x01\x12\x12\n\nworker_ids\x18\x08 \x03(\r\x12\x12\n\nprocess_id\x18\x02 \x01(\r\x12\x10\n\x08mnemonic\x18\x03 \x01(\t\x12\x14\n\x0cis_multiplex\x18\x04 \x01(\x08\x12\x12\n\nis_sandbox\x18\x05 \x01(\x08\x12\x15\n\ris_measurable\x18\x06 \x01(\x08\x12P\n\x0cworker_stats\x18\x07 \x03(\x0b\x32:.build_event_stream.BuildMetrics.WorkerMetrics.WorkerStats\x1al\n\x0bWorkerStats\x12\x1a\n\x12\x63ollect_time_in_ms\x18\x01 \x01(\x03\x12\x1b\n\x13worker_memory_in_kb\x18\x02 \x01(\x05\x12$\n\x1clast_action_start_time_in_ms\x18\x03 \x01(\x03\x1a\xe5\x02\n\x0eNetworkMetrics\x12`\n\x14system_network_stats\x18\x01 \x01(\x0b\x32\x42.build_event_stream.BuildMetrics.NetworkMetrics.SystemNetworkStats\x1a\xf0\x01\n\x12SystemNetworkStats\x12\x12\n\nbytes_sent\x18\x01 \x01(\x04\x12\x12\n\nbytes_recv\x18\x02 \x01(\x04\x12\x14\n\x0cpackets_sent\x18\x03 \x01(\x04\x12\x14\n\x0cpackets_recv\x18\x04 \x01(\x04\x12\x1f\n\x17peak_bytes_sent_per_sec\x18\x05 \x01(\x04\x12\x1f\n\x17peak_bytes_recv_per_sec\x18\x06 \x01(\x04\x12!\n\x19peak_packets_sent_per_sec\x18\x07 \x01(\x04\x12!\n\x19peak_packets_recv_per_sec\x18\x08 \x01(\x04\"6\n\rBuildToolLogs\x12%\n\x03log\x18\x01 \x03(\x0b\x32\x18.build_event_stream.File\"e\n\x1d\x43onvenienceSymlinksIdentified\x12\x44\n\x14\x63onvenience_symlinks\x18\x01 \x03(\x0b\x32&.build_event_stream.ConvenienceSymlink\"\xa0\x01\n\x12\x43onvenienceSymlink\x12\x0c\n\x04path\x18\x01 \x01(\t\x12=\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32-.build_event_stream.ConvenienceSymlink.Action\x12\x0e\n\x06target\x18\x03 \x01(\t\"-\n\x06\x41\x63tion\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x43REATE\x10\x01\x12\n\n\x06\x44\x45LETE\x10\x02\"\x86\x0c\n\nBuildEvent\x12,\n\x02id\x18\x01 \x01(\x0b\x32 .build_event_stream.BuildEventId\x12\x32\n\x08\x63hildren\x18\x02 \x03(\x0b\x32 .build_event_stream.BuildEventId\x12\x14\n\x0clast_message\x18\x14 \x01(\x08\x12\x30\n\x08progress\x18\x03 \x01(\x0b\x32\x1c.build_event_stream.ProgressH\x00\x12.\n\x07\x61\x62orted\x18\x04 \x01(\x0b\x32\x1b.build_event_stream.AbortedH\x00\x12\x33\n\x07started\x18\x05 \x01(\x0b\x32 .build_event_stream.BuildStartedH\x00\x12P\n\x19unstructured_command_line\x18\x0c \x01(\x0b\x32+.build_event_stream.UnstructuredCommandLineH\x00\x12<\n\x17structured_command_line\x18\x16 \x01(\x0b\x32\x19.command_line.CommandLineH\x00\x12;\n\x0eoptions_parsed\x18\r \x01(\x0b\x32!.build_event_stream.OptionsParsedH\x00\x12?\n\x10workspace_status\x18\x10 \x01(\x0b\x32#.build_event_stream.WorkspaceStatusH\x00\x12*\n\x05\x66\x65tch\x18\x15 \x01(\x0b\x32\x19.build_event_stream.FetchH\x00\x12:\n\rconfiguration\x18\x11 \x01(\x0b\x32!.build_event_stream.ConfigurationH\x00\x12\x37\n\x08\x65xpanded\x18\x06 \x01(\x0b\x32#.build_event_stream.PatternExpandedH\x00\x12:\n\nconfigured\x18\x12 \x01(\x0b\x32$.build_event_stream.TargetConfiguredH\x00\x12\x34\n\x06\x61\x63tion\x18\x07 \x01(\x0b\x32\".build_event_stream.ActionExecutedH\x00\x12\x41\n\x12named_set_of_files\x18\x0f \x01(\x0b\x32#.build_event_stream.NamedSetOfFilesH\x00\x12\x37\n\tcompleted\x18\x08 \x01(\x0b\x32\".build_event_stream.TargetCompleteH\x00\x12\x35\n\x0btest_result\x18\n \x01(\x0b\x32\x1e.build_event_stream.TestResultH\x00\x12\x37\n\x0ctest_summary\x18\t \x01(\x0b\x32\x1f.build_event_stream.TestSummaryH\x00\x12;\n\x0etarget_summary\x18\x1c \x01(\x0b\x32!.build_event_stream.TargetSummaryH\x00\x12\x35\n\x08\x66inished\x18\x0e \x01(\x0b\x32!.build_event_stream.BuildFinishedH\x00\x12<\n\x0f\x62uild_tool_logs\x18\x17 \x01(\x0b\x32!.build_event_stream.BuildToolLogsH\x00\x12\x39\n\rbuild_metrics\x18\x18 \x01(\x0b\x32 .build_event_stream.BuildMetricsH\x00\x12=\n\x0eworkspace_info\x18\x19 \x01(\x0b\x32#.build_event_stream.WorkspaceConfigH\x00\x12;\n\x0e\x62uild_metadata\x18\x1a \x01(\x0b\x32!.build_event_stream.BuildMetadataH\x00\x12\\\n\x1f\x63onvenience_symlinks_identified\x18\x1b \x01(\x0b\x32\x31.build_event_stream.ConvenienceSymlinksIdentifiedH\x00\x42\t\n\x07payloadJ\x04\x08\x0b\x10\x0cJ\x04\x08\x13\x10\x14*G\n\x08TestSize\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05SMALL\x10\x01\x12\n\n\x06MEDIUM\x10\x02\x12\t\n\x05LARGE\x10\x03\x12\x0c\n\x08\x45NORMOUS\x10\x04*\xa4\x01\n\nTestStatus\x12\r\n\tNO_STATUS\x10\x00\x12\n\n\x06PASSED\x10\x01\x12\t\n\x05\x46LAKY\x10\x02\x12\x0b\n\x07TIMEOUT\x10\x03\x12\n\n\x06\x46\x41ILED\x10\x04\x12\x0e\n\nINCOMPLETE\x10\x05\x12\x12\n\x0eREMOTE_FAILURE\x10\x06\x12\x13\n\x0f\x46\x41ILED_TO_BUILD\x10\x07\x12\x1e\n\x1aTOOL_HALTED_BEFORE_TESTING\x10\x08\x42H\n.com.google.devtools.build.lib.buildeventstreamB\x16\x42uildEventStreamProtosb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'src.main.java.com.google.devtools.build.lib.buildeventstream.proto.build_event_stream_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n.com.google.devtools.build.lib.buildeventstreamB\026BuildEventStreamProtos'
_BUILDSTARTED.fields_by_name['start_time_millis']._options = None
_BUILDSTARTED.fields_by_name['start_time_millis']._serialized_options = b'\030\001'
_BUILDMETADATA_METADATAENTRY._options = None
_BUILDMETADATA_METADATAENTRY._serialized_options = b'8\001'
_CONFIGURATION_MAKEVARIABLEENTRY._options = None
_CONFIGURATION_MAKEVARIABLEENTRY._serialized_options = b'8\001'
_ACTIONEXECUTED.fields_by_name['label']._options = None
_ACTIONEXECUTED.fields_by_name['label']._serialized_options = b'\030\001'
_ACTIONEXECUTED.fields_by_name['configuration']._options = None
_ACTIONEXECUTED.fields_by_name['configuration']._serialized_options = b'\030\001'
_TARGETCOMPLETE.fields_by_name['target_kind']._options = None
_TARGETCOMPLETE.fields_by_name['target_kind']._serialized_options = b'\030\001'
_TARGETCOMPLETE.fields_by_name['test_size']._options = None
_TARGETCOMPLETE.fields_by_name['test_size']._serialized_options = b'\030\001'
_TARGETCOMPLETE.fields_by_name['important_output']._options = None
_TARGETCOMPLETE.fields_by_name['important_output']._serialized_options = b'\030\001'
_TARGETCOMPLETE.fields_by_name['test_timeout_seconds']._options = None
_TARGETCOMPLETE.fields_by_name['test_timeout_seconds']._serialized_options = b'\030\001'
_TESTRESULT_EXECUTIONINFO_TIMINGBREAKDOWN.fields_by_name['time_millis']._options = None
_TESTRESULT_EXECUTIONINFO_TIMINGBREAKDOWN.fields_by_name['time_millis']._serialized_options = b'\030\001'
_TESTRESULT_EXECUTIONINFO.fields_by_name['timeout_seconds']._options = None
_TESTRESULT_EXECUTIONINFO.fields_by_name['timeout_seconds']._serialized_options = b'\030\001'
_TESTRESULT.fields_by_name['test_attempt_start_millis_epoch']._options = None
_TESTRESULT.fields_by_name['test_attempt_start_millis_epoch']._serialized_options = b'\030\001'
_TESTRESULT.fields_by_name['test_attempt_duration_millis']._options = None
_TESTRESULT.fields_by_name['test_attempt_duration_millis']._serialized_options = b'\030\001'
_TESTSUMMARY.fields_by_name['first_start_time_millis']._options = None
_TESTSUMMARY.fields_by_name['first_start_time_millis']._serialized_options = b'\030\001'
_TESTSUMMARY.fields_by_name['last_stop_time_millis']._options = None
_TESTSUMMARY.fields_by_name['last_stop_time_millis']._serialized_options = b'\030\001'
_TESTSUMMARY.fields_by_name['total_run_duration_millis']._options = None
_TESTSUMMARY.fields_by_name['total_run_duration_millis']._serialized_options = b'\030\001'
_BUILDFINISHED.fields_by_name['overall_success']._options = None
_BUILDFINISHED.fields_by_name['overall_success']._serialized_options = b'\030\001'
_BUILDFINISHED.fields_by_name['finish_time_millis']._options = None
_BUILDFINISHED.fields_by_name['finish_time_millis']._serialized_options = b'\030\001'
_BUILDFINISHED.fields_by_name['anomaly_report']._options = None
_BUILDFINISHED.fields_by_name['anomaly_report']._serialized_options = b'\030\001'
_BUILDMETRICS_ACTIONSUMMARY.fields_by_name['remote_cache_hits']._options = None
_BUILDMETRICS_ACTIONSUMMARY.fields_by_name['remote_cache_hits']._serialized_options = b'\030\001'
_BUILDMETRICS_WORKERMETRICS.fields_by_name['worker_id']._options = None
_BUILDMETRICS_WORKERMETRICS.fields_by_name['worker_id']._serialized_options = b'\030\001'
_TESTSIZE._serialized_start=13796
_TESTSIZE._serialized_end=13867
_TESTSTATUS._serialized_start=13870
_TESTSTATUS._serialized_end=14034
_BUILDEVENTID._serialized_start=303
_BUILDEVENTID._serialized_end=3637
_BUILDEVENTID_UNKNOWNBUILDEVENTID._serialized_start=2337
_BUILDEVENTID_UNKNOWNBUILDEVENTID._serialized_end=2375
_BUILDEVENTID_PROGRESSID._serialized_start=2377
_BUILDEVENTID_PROGRESSID._serialized_end=2411
_BUILDEVENTID_BUILDSTARTEDID._serialized_start=2413
_BUILDEVENTID_BUILDSTARTEDID._serialized_end=2429
_BUILDEVENTID_UNSTRUCTUREDCOMMANDLINEID._serialized_start=2431
_BUILDEVENTID_UNSTRUCTUREDCOMMANDLINEID._serialized_end=2458
_BUILDEVENTID_STRUCTUREDCOMMANDLINEID._serialized_start=2460
_BUILDEVENTID_STRUCTUREDCOMMANDLINEID._serialized_end=2513
_BUILDEVENTID_WORKSPACESTATUSID._serialized_start=2515
_BUILDEVENTID_WORKSPACESTATUSID._serialized_end=2534
_BUILDEVENTID_OPTIONSPARSEDID._serialized_start=2536
_BUILDEVENTID_OPTIONSPARSEDID._serialized_end=2553
_BUILDEVENTID_FETCHID._serialized_start=2555
_BUILDEVENTID_FETCHID._serialized_end=2577
_BUILDEVENTID_PATTERNEXPANDEDID._serialized_start=2579
_BUILDEVENTID_PATTERNEXPANDEDID._serialized_end=2615
_BUILDEVENTID_WORKSPACECONFIGID._serialized_start=2617
_BUILDEVENTID_WORKSPACECONFIGID._serialized_end=2636
_BUILDEVENTID_BUILDMETADATAID._serialized_start=2638
_BUILDEVENTID_BUILDMETADATAID._serialized_end=2655
_BUILDEVENTID_TARGETCONFIGUREDID._serialized_start=2657
_BUILDEVENTID_TARGETCONFIGUREDID._serialized_end=2708
_BUILDEVENTID_NAMEDSETOFFILESID._serialized_start=2710
_BUILDEVENTID_NAMEDSETOFFILESID._serialized_end=2741
_BUILDEVENTID_CONFIGURATIONID._serialized_start=2743
_BUILDEVENTID_CONFIGURATIONID._serialized_end=2772
_BUILDEVENTID_TARGETCOMPLETEDID._serialized_start=2774
_BUILDEVENTID_TARGETCOMPLETEDID._serialized_end=2897
_BUILDEVENTID_ACTIONCOMPLETEDID._serialized_start=2900
_BUILDEVENTID_ACTIONCOMPLETEDID._serialized_end=3031
_BUILDEVENTID_UNCONFIGUREDLABELID._serialized_start=3033
_BUILDEVENTID_UNCONFIGUREDLABELID._serialized_end=3069
_BUILDEVENTID_CONFIGUREDLABELID._serialized_start=3071
_BUILDEVENTID_CONFIGUREDLABELID._serialized_end=3178
_BUILDEVENTID_TESTRESULTID._serialized_start=3181
_BUILDEVENTID_TESTRESULTID._serialized_end=3328
_BUILDEVENTID_TESTSUMMARYID._serialized_start=3330
_BUILDEVENTID_TESTSUMMARYID._serialized_end=3433
_BUILDEVENTID_TARGETSUMMARYID._serialized_start=3435
_BUILDEVENTID_TARGETSUMMARYID._serialized_end=3540
_BUILDEVENTID_BUILDFINISHEDID._serialized_start=3542
_BUILDEVENTID_BUILDFINISHEDID._serialized_end=3559
_BUILDEVENTID_BUILDTOOLLOGSID._serialized_start=3561
_BUILDEVENTID_BUILDTOOLLOGSID._serialized_end=3578
_BUILDEVENTID_BUILDMETRICSID._serialized_start=3580
_BUILDEVENTID_BUILDMETRICSID._serialized_end=3596
_BUILDEVENTID_CONVENIENCESYMLINKSIDENTIFIEDID._serialized_start=3598
_BUILDEVENTID_CONVENIENCESYMLINKSIDENTIFIEDID._serialized_end=3631
_PROGRESS._serialized_start=3639
_PROGRESS._serialized_end=3681
_ABORTED._serialized_start=3684
_ABORTED._serialized_end=4003
_ABORTED_ABORTREASON._serialized_start=3774
_ABORTED_ABORTREASON._serialized_end=4003
_BUILDSTARTED._serialized_start=4006
_BUILDSTARTED._serialized_end=4263
_WORKSPACECONFIG._serialized_start=4265
_WORKSPACECONFIG._serialized_end=4307
_UNSTRUCTUREDCOMMANDLINE._serialized_start=4309
_UNSTRUCTUREDCOMMANDLINE._serialized_end=4348
_OPTIONSPARSED._serialized_start=4351
_OPTIONSPARSED._serialized_end=4558
_FETCH._serialized_start=4560
_FETCH._serialized_end=4584
_WORKSPACESTATUS._serialized_start=4586
_WORKSPACESTATUS._serialized_end=4695
_WORKSPACESTATUS_ITEM._serialized_start=4661
_WORKSPACESTATUS_ITEM._serialized_end=4695
_BUILDMETADATA._serialized_start=4698
_BUILDMETADATA._serialized_end=4829
_BUILDMETADATA_METADATAENTRY._serialized_start=4782
_BUILDMETADATA_METADATAENTRY._serialized_end=4829
_CONFIGURATION._serialized_start=4832
_CONFIGURATION._serialized_end=5047
_CONFIGURATION_MAKEVARIABLEENTRY._serialized_start=4996
_CONFIGURATION_MAKEVARIABLEENTRY._serialized_end=5047
_PATTERNEXPANDED._serialized_start=5050
_PATTERNEXPANDED._serialized_end=5218
_PATTERNEXPANDED_TESTSUITEEXPANSION._serialized_start=5156
_PATTERNEXPANDED_TESTSUITEEXPANSION._serialized_end=5218
_TARGETCONFIGURED._serialized_start=5220
_TARGETCONFIGURED._serialized_end=5321
_FILE._serialized_start=5323
_FILE._serialized_end=5439
_NAMEDSETOFFILES._serialized_start=5442
_NAMEDSETOFFILES._serialized_end=5571
_ACTIONEXECUTED._serialized_start=5574
_ACTIONEXECUTED._serialized_end=6004
_OUTPUTGROUP._serialized_start=6006
_OUTPUTGROUP._serialized_end=6130
_TARGETCOMPLETE._serialized_start=6133
_TARGETCOMPLETE._serialized_end=6559
_TESTRESULT._serialized_start=6562
_TESTRESULT._serialized_end=7540
_TESTRESULT_EXECUTIONINFO._serialized_start=7015
_TESTRESULT_EXECUTIONINFO._serialized_end=7534
_TESTRESULT_EXECUTIONINFO_TIMINGBREAKDOWN._serialized_start=7314
_TESTRESULT_EXECUTIONINFO_TIMINGBREAKDOWN._serialized_end=7488
_TESTRESULT_EXECUTIONINFO_RESOURCEUSAGE._serialized_start=7490
_TESTRESULT_EXECUTIONINFO_RESOURCEUSAGE._serialized_end=7534
_TESTSUMMARY._serialized_start=7543
_TESTSUMMARY._serialized_end=8082
_TARGETSUMMARY._serialized_start=8084
_TARGETSUMMARY._serialized_end=8191
_BUILDFINISHED._serialized_start=8194
_BUILDFINISHED._serialized_end=8539
_BUILDFINISHED_EXITCODE._serialized_start=8461
_BUILDFINISHED_EXITCODE._serialized_end=8499
_BUILDFINISHED_ANOMALYREPORT._serialized_start=8501
_BUILDFINISHED_ANOMALYREPORT._serialized_end=8539
_BUILDMETRICS._serialized_start=8542
_BUILDMETRICS._serialized_end=11927
_BUILDMETRICS_ACTIONSUMMARY._serialized_start=9304
_BUILDMETRICS_ACTIONSUMMARY._serialized_end=9761
_BUILDMETRICS_ACTIONSUMMARY_ACTIONDATA._serialized_start=9612
_BUILDMETRICS_ACTIONSUMMARY_ACTIONDATA._serialized_end=9717
_BUILDMETRICS_ACTIONSUMMARY_RUNNERCOUNT._serialized_start=9719
_BUILDMETRICS_ACTIONSUMMARY_RUNNERCOUNT._serialized_end=9761
_BUILDMETRICS_MEMORYMETRICS._serialized_start=9764
_BUILDMETRICS_MEMORYMETRICS._serialized_end=10039
_BUILDMETRICS_MEMORYMETRICS_GARBAGEMETRICS._serialized_start=9982
_BUILDMETRICS_MEMORYMETRICS_GARBAGEMETRICS._serialized_end=10039
_BUILDMETRICS_TARGETMETRICS._serialized_start=10041
_BUILDMETRICS_TARGETMETRICS._serialized_end=10158
_BUILDMETRICS_PACKAGEMETRICS._serialized_start=10160
_BUILDMETRICS_PACKAGEMETRICS._serialized_end=10201
_BUILDMETRICS_TIMINGMETRICS._serialized_start=10203
_BUILDMETRICS_TIMINGMETRICS._serialized_end=10302
_BUILDMETRICS_CUMULATIVEMETRICS._serialized_start=10304
_BUILDMETRICS_CUMULATIVEMETRICS._serialized_end=10365
_BUILDMETRICS_ARTIFACTMETRICS._serialized_start=10368
_BUILDMETRICS_ARTIFACTMETRICS._serialized_end=10827
_BUILDMETRICS_ARTIFACTMETRICS_FILESMETRIC._serialized_start=10770
_BUILDMETRICS_ARTIFACTMETRICS_FILESMETRIC._serialized_end=10821
_BUILDMETRICS_BUILDGRAPHMETRICS._serialized_start=10830
_BUILDMETRICS_BUILDGRAPHMETRICS._serialized_end=11211
_BUILDMETRICS_WORKERMETRICS._serialized_start=11214
_BUILDMETRICS_WORKERMETRICS._serialized_end=11567
_BUILDMETRICS_WORKERMETRICS_WORKERSTATS._serialized_start=11459
_BUILDMETRICS_WORKERMETRICS_WORKERSTATS._serialized_end=11567
_BUILDMETRICS_NETWORKMETRICS._serialized_start=11570
_BUILDMETRICS_NETWORKMETRICS._serialized_end=11927
_BUILDMETRICS_NETWORKMETRICS_SYSTEMNETWORKSTATS._serialized_start=11687
_BUILDMETRICS_NETWORKMETRICS_SYSTEMNETWORKSTATS._serialized_end=11927
_BUILDTOOLLOGS._serialized_start=11929
_BUILDTOOLLOGS._serialized_end=11983
_CONVENIENCESYMLINKSIDENTIFIED._serialized_start=11985
_CONVENIENCESYMLINKSIDENTIFIED._serialized_end=12086
_CONVENIENCESYMLINK._serialized_start=12089
_CONVENIENCESYMLINK._serialized_end=12249
_CONVENIENCESYMLINK_ACTION._serialized_start=12204
_CONVENIENCESYMLINK_ACTION._serialized_end=12249
_BUILDEVENT._serialized_start=12252
_BUILDEVENT._serialized_end=13794
# @@protoc_insertion_point(module_scope)
|
xmanager-main
|
xmanager/generated/build_event_stream_pb2.py
|
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: disable
# Generated by the protocol buffer compiler. DO NOT EDIT!
# pylint: skip-file
# source: src/main/protobuf/invocation_policy.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)src/main/protobuf/invocation_policy.proto\x12\x17\x62laze.invocation_policy\"N\n\x10InvocationPolicy\x12:\n\rflag_policies\x18\x01 \x03(\x0b\x32#.blaze.invocation_policy.FlagPolicy\"\xb4\x02\n\nFlagPolicy\x12\x11\n\tflag_name\x18\x01 \x01(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x36\n\tset_value\x18\x03 \x01(\x0b\x32!.blaze.invocation_policy.SetValueH\x00\x12:\n\x0buse_default\x18\x04 \x01(\x0b\x32#.blaze.invocation_policy.UseDefaultH\x00\x12\x42\n\x0f\x64isallow_values\x18\x05 \x01(\x0b\x32\'.blaze.invocation_policy.DisallowValuesH\x00\x12<\n\x0c\x61llow_values\x18\x06 \x01(\x0b\x32$.blaze.invocation_policy.AllowValuesH\x00\x42\x0b\n\toperation\"\xc6\x01\n\x08SetValue\x12\x12\n\nflag_value\x18\x01 \x03(\t\x12<\n\x08\x62\x65havior\x18\x04 \x01(\x0e\x32*.blaze.invocation_policy.SetValue.Behavior\"\\\n\x08\x42\x65havior\x12\r\n\tUNDEFINED\x10\x00\x12\x13\n\x0f\x41LLOW_OVERRIDES\x10\x01\x12\n\n\x06\x41PPEND\x10\x02\x12 \n\x1c\x46INAL_VALUE_IGNORE_OVERRIDES\x10\x03J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"\x0c\n\nUseDefault\"\x97\x01\n\x0e\x44isallowValues\x12\x19\n\x11\x64isallowed_values\x18\x01 \x03(\t\x12\x13\n\tnew_value\x18\x03 \x01(\tH\x00\x12:\n\x0buse_default\x18\x04 \x01(\x0b\x32#.blaze.invocation_policy.UseDefaultH\x00\x42\x13\n\x11replacement_valueJ\x04\x08\x02\x10\x03\"\x91\x01\n\x0b\x41llowValues\x12\x16\n\x0e\x61llowed_values\x18\x01 \x03(\t\x12\x13\n\tnew_value\x18\x03 \x01(\tH\x00\x12:\n\x0buse_default\x18\x04 \x01(\x0b\x32#.blaze.invocation_policy.UseDefaultH\x00\x42\x13\n\x11replacement_valueJ\x04\x08\x02\x10\x03\x42-\n+com.google.devtools.build.lib.runtime.proto')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'src.main.protobuf.invocation_policy_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n+com.google.devtools.build.lib.runtime.proto'
_INVOCATIONPOLICY._serialized_start=70
_INVOCATIONPOLICY._serialized_end=148
_FLAGPOLICY._serialized_start=151
_FLAGPOLICY._serialized_end=459
_SETVALUE._serialized_start=462
_SETVALUE._serialized_end=660
_SETVALUE_BEHAVIOR._serialized_start=556
_SETVALUE_BEHAVIOR._serialized_end=648
_USEDEFAULT._serialized_start=662
_USEDEFAULT._serialized_end=674
_DISALLOWVALUES._serialized_start=677
_DISALLOWVALUES._serialized_end=828
_ALLOWVALUES._serialized_start=831
_ALLOWVALUES._serialized_end=976
# @@protoc_insertion_point(module_scope)
|
xmanager-main
|
xmanager/generated/invocation_policy_pb2.py
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: disable
# Generated by the protocol buffer compiler. DO NOT EDIT!
# pylint: skip-file
# source: src/main/protobuf/option_filters.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&src/main/protobuf/option_filters.proto\x12\x07options*\xea\x02\n\x0fOptionEffectTag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05NO_OP\x10\x01\x12\x1b\n\x17LOSES_INCREMENTAL_STATE\x10\x02\x12\x12\n\x0e\x43HANGES_INPUTS\x10\x03\x12\x13\n\x0f\x41\x46\x46\x45\x43TS_OUTPUTS\x10\x04\x12\x18\n\x14\x42UILD_FILE_SEMANTICS\x10\x05\x12 \n\x1c\x42\x41ZEL_INTERNAL_CONFIGURATION\x10\x06\x12\x18\n\x14LOADING_AND_ANALYSIS\x10\x07\x12\r\n\tEXECUTION\x10\x08\x12\'\n#HOST_MACHINE_RESOURCE_OPTIMIZATIONS\x10\t\x12\x15\n\x11\x45\x41GERNESS_TO_EXIT\x10\n\x12\x14\n\x10\x42\x41ZEL_MONITORING\x10\x0b\x12\x13\n\x0fTERMINAL_OUTPUT\x10\x0c\x12\x18\n\x14\x41\x43TION_COMMAND_LINES\x10\r\x12\x0f\n\x0bTEST_RUNNER\x10\x0e*\xb2\x01\n\x11OptionMetadataTag\x12\x10\n\x0c\x45XPERIMENTAL\x10\x00\x12\x17\n\x13INCOMPATIBLE_CHANGE\x10\x01\x12\x0e\n\nDEPRECATED\x10\x02\x12\n\n\x06HIDDEN\x10\x03\x12\x0c\n\x08INTERNAL\x10\x04\x12\x1b\n\x17\x45XPLICIT_IN_OUTPUT_PATH\x10\x06\"\x04\x08\x05\x10\x05*%TRIGGERED_BY_ALL_INCOMPATIBLE_CHANGESB*\n(com.google.devtools.common.options.protob\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'src.main.protobuf.option_filters_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n(com.google.devtools.common.options.proto'
_OPTIONEFFECTTAG._serialized_start=52
_OPTIONEFFECTTAG._serialized_end=414
_OPTIONMETADATATAG._serialized_start=417
_OPTIONMETADATATAG._serialized_end=595
# @@protoc_insertion_point(module_scope)
|
xmanager-main
|
xmanager/generated/option_filters_pb2.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: disable
# Generated by the protocol buffer compiler. DO NOT EDIT!
# pylint: skip-file
# source: xm_local/storage/data.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bxm_local/storage/data.proto\x12\x08xmanager\"\x8a\x01\n\x03Job\x12#\n\x05local\x18\x01 \x01(\x0b\x32\x12.xmanager.LocalJobH\x00\x12\'\n\x04\x63\x61ip\x18\x02 \x01(\x0b\x32\x17.xmanager.AIPlatformJobH\x00\x12-\n\nkubernetes\x18\x03 \x01(\x0b\x32\x17.xmanager.KubernetesJobH\x00\x42\x06\n\x04kind\"7\n\x08LocalJob\x12\x0b\n\x03pid\x18\x01 \x01(\t\x12\x0b\n\x03\x63md\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"&\n\rAIPlatformJob\x12\x15\n\rresource_name\x18\x01 \x01(\t\"4\n\rKubernetesJob\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x10\n\x08job_name\x18\x02 \x01(\tb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'xm_local.storage.data_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_JOB._serialized_start=42
_JOB._serialized_end=180
_LOCALJOB._serialized_start=182
_LOCALJOB._serialized_end=237
_AIPLATFORMJOB._serialized_start=239
_AIPLATFORMJOB._serialized_end=277
_KUBERNETESJOB._serialized_start=279
_KUBERNETESJOB._serialized_end=331
# @@protoc_insertion_point(module_scope)
|
xmanager-main
|
xmanager/generated/data_pb2.py
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: disable
# Generated by the protocol buffer compiler. DO NOT EDIT!
# pylint: skip-file
# source: src/main/protobuf/failure_details.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'src/main/protobuf/failure_details.proto\x12\x0f\x66\x61ilure_details\x1a google/protobuf/descriptor.proto\"*\n\x15\x46\x61ilureDetailMetadata\x12\x11\n\texit_code\x18\x01 \x01(\r\"\xe1\x1c\n\rFailureDetail\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x33\n\x0binterrupted\x18\x65 \x01(\x0b\x32\x1c.failure_details.InterruptedH\x00\x12\x42\n\x13\x65xternal_repository\x18g \x01(\x0b\x32#.failure_details.ExternalRepositoryH\x00\x12\x38\n\x0e\x62uild_progress\x18h \x01(\x0b\x32\x1e.failure_details.BuildProgressH\x00\x12\x38\n\x0eremote_options\x18j \x01(\x0b\x32\x1e.failure_details.RemoteOptionsH\x00\x12@\n\x12\x63lient_environment\x18k \x01(\x0b\x32\".failure_details.ClientEnvironmentH\x00\x12\'\n\x05\x63rash\x18l \x01(\x0b\x32\x16.failure_details.CrashH\x00\x12\x38\n\x0esymlink_forest\x18n \x01(\x0b\x32\x1e.failure_details.SymlinkForestH\x00\x12:\n\x0fpackage_options\x18r \x01(\x0b\x32\x1f.failure_details.PackageOptionsH\x00\x12<\n\x10remote_execution\x18s \x01(\x0b\x32 .failure_details.RemoteExecutionH\x00\x12/\n\texecution\x18t \x01(\x0b\x32\x1a.failure_details.ExecutionH\x00\x12\x31\n\nworkspaces\x18u \x01(\x0b\x32\x1b.failure_details.WorkspacesH\x00\x12\x36\n\rcrash_options\x18v \x01(\x0b\x32\x1d.failure_details.CrashOptionsH\x00\x12\x31\n\nfilesystem\x18w \x01(\x0b\x32\x1b.failure_details.FilesystemH\x00\x12>\n\x11\x65xecution_options\x18y \x01(\x0b\x32!.failure_details.ExecutionOptionsH\x00\x12+\n\x07\x63ommand\x18z \x01(\x0b\x32\x18.failure_details.CommandH\x00\x12\'\n\x05spawn\x18{ \x01(\x0b\x32\x16.failure_details.SpawnH\x00\x12\x32\n\x0bgrpc_server\x18| \x01(\x0b\x32\x1b.failure_details.GrpcServerH\x00\x12@\n\x12\x63\x61nonicalize_flags\x18} \x01(\x0b\x32\".failure_details.CanonicalizeFlagsH\x00\x12\x42\n\x13\x62uild_configuration\x18~ \x01(\x0b\x32#.failure_details.BuildConfigurationH\x00\x12\x34\n\x0cinfo_command\x18\x7f \x01(\x0b\x32\x1c.failure_details.InfoCommandH\x00\x12\x39\n\x0ememory_options\x18\x81\x01 \x01(\x0b\x32\x1e.failure_details.MemoryOptionsH\x00\x12(\n\x05query\x18\x82\x01 \x01(\x0b\x32\x16.failure_details.QueryH\x00\x12;\n\x0flocal_execution\x18\x84\x01 \x01(\x0b\x32\x1f.failure_details.LocalExecutionH\x00\x12\x35\n\x0c\x61\x63tion_cache\x18\x86\x01 \x01(\x0b\x32\x1c.failure_details.ActionCacheH\x00\x12\x37\n\rfetch_command\x18\x87\x01 \x01(\x0b\x32\x1d.failure_details.FetchCommandH\x00\x12\x35\n\x0csync_command\x18\x88\x01 \x01(\x0b\x32\x1c.failure_details.SyncCommandH\x00\x12,\n\x07sandbox\x18\x89\x01 \x01(\x0b\x32\x18.failure_details.SandboxH\x00\x12=\n\x10include_scanning\x18\x8b\x01 \x01(\x0b\x32 .failure_details.IncludeScanningH\x00\x12\x35\n\x0ctest_command\x18\x8c\x01 \x01(\x0b\x32\x1c.failure_details.TestCommandH\x00\x12\x35\n\x0c\x61\x63tion_query\x18\x8d\x01 \x01(\x0b\x32\x1c.failure_details.ActionQueryH\x00\x12;\n\x0ftarget_patterns\x18\x8e\x01 \x01(\x0b\x32\x1f.failure_details.TargetPatternsH\x00\x12\x37\n\rclean_command\x18\x90\x01 \x01(\x0b\x32\x1d.failure_details.CleanCommandH\x00\x12\x39\n\x0e\x63onfig_command\x18\x91\x01 \x01(\x0b\x32\x1e.failure_details.ConfigCommandH\x00\x12\x41\n\x12\x63onfigurable_query\x18\x92\x01 \x01(\x0b\x32\".failure_details.ConfigurableQueryH\x00\x12\x35\n\x0c\x64ump_command\x18\x93\x01 \x01(\x0b\x32\x1c.failure_details.DumpCommandH\x00\x12\x35\n\x0chelp_command\x18\x94\x01 \x01(\x0b\x32\x1c.failure_details.HelpCommandH\x00\x12\x39\n\x0emobile_install\x18\x96\x01 \x01(\x0b\x32\x1e.failure_details.MobileInstallH\x00\x12;\n\x0fprofile_command\x18\x97\x01 \x01(\x0b\x32\x1f.failure_details.ProfileCommandH\x00\x12\x33\n\x0brun_command\x18\x98\x01 \x01(\x0b\x32\x1b.failure_details.RunCommandH\x00\x12;\n\x0fversion_command\x18\x99\x01 \x01(\x0b\x32\x1f.failure_details.VersionCommandH\x00\x12\x44\n\x14print_action_command\x18\x9a\x01 \x01(\x0b\x32#.failure_details.PrintActionCommandH\x00\x12=\n\x10workspace_status\x18\x9e\x01 \x01(\x0b\x32 .failure_details.WorkspaceStatusH\x00\x12\x35\n\x0cjava_compile\x18\x9f\x01 \x01(\x0b\x32\x1c.failure_details.JavaCompileH\x00\x12=\n\x10\x61\x63tion_rewinding\x18\xa0\x01 \x01(\x0b\x32 .failure_details.ActionRewindingH\x00\x12\x33\n\x0b\x63pp_compile\x18\xa1\x01 \x01(\x0b\x32\x1b.failure_details.CppCompileH\x00\x12;\n\x0fstarlark_action\x18\xa2\x01 \x01(\x0b\x32\x1f.failure_details.StarlarkActionH\x00\x12\x35\n\x0cninja_action\x18\xa3\x01 \x01(\x0b\x32\x1c.failure_details.NinjaActionH\x00\x12?\n\x11\x64ynamic_execution\x18\xa4\x01 \x01(\x0b\x32!.failure_details.DynamicExecutionH\x00\x12\x33\n\x0b\x66\x61il_action\x18\xa6\x01 \x01(\x0b\x32\x1b.failure_details.FailActionH\x00\x12\x39\n\x0esymlink_action\x18\xa7\x01 \x01(\x0b\x32\x1e.failure_details.SymlinkActionH\x00\x12-\n\x08\x63pp_link\x18\xa8\x01 \x01(\x0b\x32\x18.failure_details.CppLinkH\x00\x12\x31\n\nlto_action\x18\xa9\x01 \x01(\x0b\x32\x1a.failure_details.LtoActionH\x00\x12\x33\n\x0btest_action\x18\xac\x01 \x01(\x0b\x32\x1b.failure_details.TestActionH\x00\x12*\n\x06worker\x18\xad\x01 \x01(\x0b\x32\x17.failure_details.WorkerH\x00\x12.\n\x08\x61nalysis\x18\xae\x01 \x01(\x0b\x32\x19.failure_details.AnalysisH\x00\x12;\n\x0fpackage_loading\x18\xaf\x01 \x01(\x0b\x32\x1f.failure_details.PackageLoadingH\x00\x12\x30\n\ttoolchain\x18\xb1\x01 \x01(\x0b\x32\x1a.failure_details.ToolchainH\x00\x12=\n\x10starlark_loading\x18\xb3\x01 \x01(\x0b\x32 .failure_details.StarlarkLoadingH\x00\x12\x37\n\rexternal_deps\x18\xb5\x01 \x01(\x0b\x32\x1d.failure_details.ExternalDepsH\x00\x12\x39\n\x0e\x64iff_awareness\x18\xb6\x01 \x01(\x0b\x32\x1e.failure_details.DiffAwarenessH\x00\x12=\n\x10modquery_command\x18\xb7\x01 \x01(\x0b\x32 .failure_details.ModqueryCommandH\x00\x12\x35\n\x0c\x62uild_report\x18\xb8\x01 \x01(\x0b\x32\x1c.failure_details.BuildReportH\x00\x42\n\n\x08\x63\x61tegoryJ\x04\x08\x02\x10\x65J\x04\x08\x66\x10gJ\x04\x08i\x10jJ\x04\x08m\x10nJ\x04\x08o\x10rJ\x04\x08x\x10yJ\x06\x08\x80\x01\x10\x81\x01J\x06\x08\x83\x01\x10\x84\x01J\x06\x08\x85\x01\x10\x86\x01J\x06\x08\x8a\x01\x10\x8b\x01J\x06\x08\x8f\x01\x10\x90\x01J\x06\x08\x95\x01\x10\x96\x01J\x06\x08\x9b\x01\x10\x9e\x01J\x06\x08\xa5\x01\x10\xa6\x01J\x06\x08\xaa\x01\x10\xac\x01J\x06\x08\xb0\x01\x10\xb1\x01J\x06\x08\xb2\x01\x10\xb3\x01J\x06\x08\xb4\x01\x10\xb5\x01\"\xa2\x05\n\x0bInterrupted\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.Interrupted.Code\"\xe1\x04\n\x04\x43ode\x12\x1e\n\x13INTERRUPTED_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08\x08\x12\x16\n\x0bINTERRUPTED\x10\x1c\x1a\x05\xb2\x43\x02\x08\x08\x12\x1b\n\x10\x44\x45PRECATED_BUILD\x10\x04\x1a\x05\xb2\x43\x02\x08\x08\x12&\n\x1b\x44\x45PRECATED_BUILD_COMPLETION\x10\x05\x1a\x05\xb2\x43\x02\x08\x08\x12*\n\x1f\x44\x45PRECATED_PACKAGE_LOADING_SYNC\x10\x06\x1a\x05\xb2\x43\x02\x08\x08\x12)\n\x1e\x44\x45PRECATED_EXECUTOR_COMPLETION\x10\x07\x1a\x05\xb2\x43\x02\x08\x08\x12&\n\x1b\x44\x45PRECATED_COMMAND_DISPATCH\x10\x08\x1a\x05\xb2\x43\x02\x08\x08\x12\x1f\n\x14\x44\x45PRECATED_INFO_ITEM\x10\t\x1a\x05\xb2\x43\x02\x08\x08\x12!\n\x16\x44\x45PRECATED_AFTER_QUERY\x10\n\x1a\x05\xb2\x43\x02\x08\x08\x12#\n\x18\x44\x45PRECATED_FETCH_COMMAND\x10\x11\x1a\x05\xb2\x43\x02\x08\x08\x12\"\n\x17\x44\x45PRECATED_SYNC_COMMAND\x10\x12\x1a\x05\xb2\x43\x02\x08\x08\x12#\n\x18\x44\x45PRECATED_CLEAN_COMMAND\x10\x14\x1a\x05\xb2\x43\x02\x08\x08\x12,\n!DEPRECATED_MOBILE_INSTALL_COMMAND\x10\x15\x1a\x05\xb2\x43\x02\x08\x08\x12\x1b\n\x10\x44\x45PRECATED_QUERY\x10\x16\x1a\x05\xb2\x43\x02\x08\x08\x12!\n\x16\x44\x45PRECATED_RUN_COMMAND\x10\x17\x1a\x05\xb2\x43\x02\x08\x08\x12%\n\x1a\x44\x45PRECATED_OPTIONS_PARSING\x10\x1b\x1a\x05\xb2\x43\x02\x08\x08\"\x04\x08\x01\x10\x03\"\x04\x08\x0b\x10\x10\"\x04\x08\x13\x10\x13\"\x04\x08\x18\x10\x1a\"\xc7\x04\n\x05Spawn\x12)\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1b.failure_details.Spawn.Code\x12\x14\n\x0c\x63\x61tastrophic\x18\x02 \x01(\x08\x12\x17\n\x0fspawn_exit_code\x18\x03 \x01(\x05\"\xe3\x03\n\x04\x43ode\x12\x18\n\rSPAWN_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x18\n\rNON_ZERO_EXIT\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\x12\n\x07TIMEOUT\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x18\n\rOUT_OF_MEMORY\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1b\n\x10\x45XECUTION_FAILED\x10\x04\x1a\x05\xb2\x43\x02\x08\"\x12\x1b\n\x10\x45XECUTION_DENIED\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13REMOTE_CACHE_FAILED\x10\x06\x1a\x05\xb2\x43\x02\x08\"\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11\x45XEC_IO_EXCEPTION\x10\x08\x1a\x05\xb2\x43\x02\x08$\x12\x1a\n\x0fINVALID_TIMEOUT\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12.\n#INVALID_REMOTE_EXECUTION_PROPERTIES\x10\n\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18NO_USABLE_STRATEGY_FOUND\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dUNSPECIFIED_EXECUTION_FAILURE\x10\x0c\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0f\x46ORBIDDEN_INPUT\x10\r\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14REMOTE_CACHE_EVICTED\x10\x0e\x1a\x05\xb2\x43\x02\x08\'\"\x84\x02\n\x12\x45xternalRepository\x12\x36\n\x04\x63ode\x18\x01 \x01(\x0e\x32(.failure_details.ExternalRepository.Code\"\xb5\x01\n\x04\x43ode\x12&\n\x1b\x45XTERNAL_REPOSITORY_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x32\n\'OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15\x42\x41\x44_DOWNLOADER_CONFIG\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12/\n$REPOSITORY_MAPPING_RESOLUTION_FAILED\x10\x03\x1a\x05\xb2\x43\x02\x08%\"\xa7\x06\n\rBuildProgress\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.BuildProgress.Code\"\xe2\x05\n\x04\x43ode\x12!\n\x16\x42UILD_PROGRESS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12 \n\x15OUTPUT_INITIALIZATION\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12.\n#BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15\x42\x45S_LOCAL_WRITE_ERROR\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12#\n\x18\x42\x45S_INITIALIZATION_ERROR\x10\x06\x1a\x05\xb2\x43\x02\x08$\x12#\n\x18\x42\x45S_UPLOAD_TIMEOUT_ERROR\x10\x07\x1a\x05\xb2\x43\x02\x08&\x12!\n\x16\x42\x45S_FILE_WRITE_TIMEOUT\x10\x08\x1a\x05\xb2\x43\x02\x08&\x12\"\n\x17\x42\x45S_FILE_WRITE_IO_ERROR\x10\t\x1a\x05\xb2\x43\x02\x08&\x12%\n\x1a\x42\x45S_FILE_WRITE_INTERRUPTED\x10\n\x1a\x05\xb2\x43\x02\x08&\x12\"\n\x17\x42\x45S_FILE_WRITE_CANCELED\x10\x0b\x1a\x05\xb2\x43\x02\x08&\x12\'\n\x1c\x42\x45S_FILE_WRITE_UNKNOWN_ERROR\x10\x0c\x1a\x05\xb2\x43\x02\x08&\x12&\n\x1b\x42\x45S_UPLOAD_LOCAL_FILE_ERROR\x10\r\x1a\x05\xb2\x43\x02\x08&\x12*\n\x1f\x42\x45S_STREAM_NOT_RETRYING_FAILURE\x10\x0e\x1a\x05\xb2\x43\x02\x08-\x12\x37\n,BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR\x10\x0f\x1a\x05\xb2\x43\x02\x08-\x12\x38\n-BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR\x10\x10\x1a\x05\xb2\x43\x02\x08-\x12\x31\n&BES_STREAM_COMPLETED_WITH_REMOTE_ERROR\x10\x13\x1a\x05\xb2\x43\x02\x08-\x12\x32\n\'BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE\x10\x11\x1a\x05\xb2\x43\x02\x08&\"\x04\x08\x01\x10\x01\"\x04\x08\x02\x10\x02\"\x04\x08\x12\x10\x12\"\xc3\x02\n\rRemoteOptions\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.RemoteOptions.Code\"\xfe\x01\n\x04\x43ode\x12!\n\x16REMOTE_OPTIONS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x35\n*REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12#\n\x18\x43REDENTIALS_READ_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12$\n\x19\x43REDENTIALS_WRITE_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12(\n\x1d\x44OWNLOADER_WITHOUT_GRPC_CACHE\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\x12\'\n\x1c\x45XECUTION_WITH_INVALID_CACHE\x10\x05\x1a\x05\xb2\x43\x02\x08\x02\"\x9a\x01\n\x11\x43lientEnvironment\x12\x35\n\x04\x63ode\x18\x01 \x01(\x0e\x32\'.failure_details.ClientEnvironment.Code\"N\n\x04\x43ode\x12%\n\x1a\x43LIENT_ENVIRONMENT_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1f\n\x14\x43LIENT_CWD_MALFORMED\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\"\xb5\x01\n\x05\x43rash\x12)\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1b.failure_details.Crash.Code\x12*\n\x06\x63\x61uses\x18\x02 \x03(\x0b\x32\x1a.failure_details.Throwable\x12\x1d\n\x15oom_detector_override\x18\x03 \x01(\x08\"6\n\x04\x43ode\x12\x18\n\rCRASH_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x14\n\tCRASH_OOM\x10\x01\x1a\x05\xb2\x43\x02\x08!\"J\n\tThrowable\x12\x17\n\x0fthrowable_class\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x13\n\x0bstack_trace\x18\x03 \x03(\t\"\xe7\x01\n\rSymlinkForest\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.SymlinkForest.Code\"\xa2\x01\n\x04\x43ode\x12!\n\x16SYMLINK_FOREST_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x30\n%TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12)\n\x1eTOPLEVEL_OUTDIR_USED_AS_SOURCE\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12\x1a\n\x0f\x43REATION_FAILED\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\"\xc4\x01\n\x0b\x42uildReport\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.BuildReport.Code\"\x83\x01\n\x04\x43ode\x12\x1f\n\x14\x42UILD_REPORT_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x34\n)BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12$\n\x19\x42UILD_REPORT_WRITE_FAILED\x10\x02\x1a\x05\xb2\x43\x02\x08$\"\x9d\x01\n\x0ePackageOptions\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.PackageOptions.Code\"W\n\x04\x43ode\x12\"\n\x17PACKAGE_OPTIONS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1f\n\x14PACKAGE_PATH_INVALID\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\"\x04\x08\x02\x10\x02\"\x04\x08\x03\x10\x03\"\xb3\x06\n\x0fRemoteExecution\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32%.failure_details.RemoteExecution.Code\"\xea\x05\n\x04\x43ode\x12#\n\x18REMOTE_EXECUTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12%\n\x1a\x43\x41PABILITIES_QUERY_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\"\x12#\n\x18\x43REDENTIALS_INIT_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12\x1d\n\x12\x43\x41\x43HE_INIT_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12\x1a\n\x0fRPC_LOG_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\x12$\n\x19\x45XEC_CHANNEL_INIT_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08\x02\x12%\n\x1a\x43\x41\x43HE_CHANNEL_INIT_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08\x02\x12*\n\x1f\x44OWNLOADER_CHANNEL_INIT_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12\"\n\x17LOG_DIR_CLEANUP_FAILURE\x10\x08\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1a\x43LIENT_SERVER_INCOMPATIBLE\x10\t\x1a\x05\xb2\x43\x02\x08\"\x12-\n\"DOWNLOADED_INPUTS_DELETION_FAILURE\x10\n\x1a\x05\xb2\x43\x02\x08\"\x12@\n5REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD\x10\x0b\x1a\x05\xb2\x43\x02\x08\x02\x12\x41\n6REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS\x10\x0c\x1a\x05\xb2\x43\x02\x08\x02\x12\x35\n*INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE\x10\r\x1a\x05\xb2\x43\x02\x08$\x12;\n0REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE\x10\x0e\x1a\x05\xb2\x43\x02\x08\x01\x12\x19\n\x0eILLEGAL_OUTPUT\x10\x0f\x1a\x05\xb2\x43\x02\x08\x01\x12/\n$INVALID_EXEC_AND_PLATFORM_PROPERTIES\x10\x10\x1a\x05\xb2\x43\x02\x08\x01\"\xd8\r\n\tExecution\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.failure_details.Execution.Code\"\x9b\r\n\x04\x43ode\x12\x1c\n\x11\x45XECUTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12/\n$EXECUTION_LOG_INITIALIZATION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12&\n\x1b\x45XECUTION_LOG_WRITE_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12$\n\x19\x45XECROOT_CREATION_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12\x38\n-TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12\x38\n-TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12>\n3PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08$\x12\x31\n&LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08$\x12+\n LOCAL_TEMPLATE_EXPANSION_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08$\x12-\n\"INPUT_DIRECTORY_CHECK_IO_EXCEPTION\x10\n\x1a\x05\xb2\x43\x02\x08$\x12/\n$EXTRA_ACTION_OUTPUT_CREATION_FAILURE\x10\x0b\x1a\x05\xb2\x43\x02\x08$\x12#\n\x18TEST_RUNNER_IO_EXCEPTION\x10\x0c\x1a\x05\xb2\x43\x02\x08$\x12\"\n\x17\x46ILE_WRITE_IO_EXCEPTION\x10\r\x1a\x05\xb2\x43\x02\x08$\x12$\n\x19TEST_OUT_ERR_IO_EXCEPTION\x10\x0e\x1a\x05\xb2\x43\x02\x08$\x12\x32\n\'SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION\x10\x0f\x1a\x05\xb2\x43\x02\x08$\x12\x32\n\'SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION\x10\x10\x1a\x05\xb2\x43\x02\x08$\x12-\n\"SYMLINK_TREE_CREATION_IO_EXCEPTION\x10\x11\x1a\x05\xb2\x43\x02\x08$\x12\x32\n\'SYMLINK_TREE_CREATION_COMMAND_EXCEPTION\x10\x12\x1a\x05\xb2\x43\x02\x08$\x12)\n\x1e\x41\x43TION_INPUT_READ_IO_EXCEPTION\x10\x13\x1a\x05\xb2\x43\x02\x08$\x12 \n\x15\x41\x43TION_NOT_UP_TO_DATE\x10\x14\x1a\x05\xb2\x43\x02\x08\x01\x12-\n\"PSEUDO_ACTION_EXECUTION_PROHIBITED\x10\x15\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1f\x44ISCOVERED_INPUT_DOES_NOT_EXIST\x10\x16\x1a\x05\xb2\x43\x02\x08$\x12*\n\x1f\x41\x43TION_OUTPUTS_DELETION_FAILURE\x10\x17\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1a\x41\x43TION_OUTPUTS_NOT_CREATED\x10\x18\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1b\x41\x43TION_FINALIZATION_FAILURE\x10\x19\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11\x41\x43TION_INPUT_LOST\x10\x1a\x1a\x05\xb2\x43\x02\x08\x01\x12,\n!FILESYSTEM_CONTEXT_UPDATE_FAILURE\x10\x1b\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1b\x41\x43TION_OUTPUT_CLOSE_FAILURE\x10\x1c\x1a\x05\xb2\x43\x02\x08\x01\x12\'\n\x1cINPUT_DISCOVERY_IO_EXCEPTION\x10\x1d\x1a\x05\xb2\x43\x02\x08\x01\x12\x33\n(TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE\x10\x1e\x1a\x05\xb2\x43\x02\x08\x01\x12\x33\n(ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE\x10\x1f\x1a\x05\xb2\x43\x02\x08\x01\x12\x36\n+ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE\x10 \x1a\x05\xb2\x43\x02\x08\x01\x12\x37\n,ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE\x10!\x1a\x05\xb2\x43\x02\x08\x01\x12\'\n\x1cNON_ACTION_EXECUTION_FAILURE\x10\"\x1a\x05\xb2\x43\x02\x08\x01\x12\x10\n\x05\x43YCLE\x10#\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14SOURCE_INPUT_MISSING\x10$\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14UNEXPECTED_EXCEPTION\x10%\x1a\x05\xb2\x43\x02\x08\x01\x12$\n\x19SOURCE_INPUT_IO_EXCEPTION\x10\'\x1a\x05\xb2\x43\x02\x08\x01\"\x04\x08\x08\x10\x08\"\x04\x08&\x10&\"\xc4\x02\n\nWorkspaces\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.Workspaces.Code\"\x85\x02\n\x04\x43ode\x12\x1d\n\x12WORKSPACES_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x30\n%WORKSPACES_LOG_INITIALIZATION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\'\n\x1cWORKSPACES_LOG_WRITE_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12\x42\n7ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12?\n4WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\"p\n\x0c\x43rashOptions\x12\x30\n\x04\x63ode\x18\x01 \x01(\x0e\x32\".failure_details.CrashOptions.Code\".\n\x04\x43ode\x12 \n\x15\x43RASH_OPTIONS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\"\x04\x08\x01\x10\x01\"\xb2\x02\n\nFilesystem\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.Filesystem.Code\"\xf3\x01\n\x04\x43ode\x12\x1d\n\x12\x46ILESYSTEM_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x30\n%EMBEDDED_BINARIES_ENUMERATION_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12+\n SERVER_PID_TXT_FILE_READ_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12$\n\x19SERVER_FILE_WRITE_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12\x35\n*DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE\x10\x06\x1a\x05\xb2\x43\x02\x08\x02\"\x04\x08\x01\x10\x01\"\x04\x08\x02\x10\x02\"\x04\x08\x07\x10\x07\"\xea\x03\n\x10\x45xecutionOptions\x12\x34\n\x04\x63ode\x18\x01 \x01(\x0e\x32&.failure_details.ExecutionOptions.Code\"\x9f\x03\n\x04\x43ode\x12$\n\x19\x45XECUTION_OPTIONS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1b\n\x10INVALID_STRATEGY\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12:\n/REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12*\n\x1f\x44\x45PRECATED_LOCAL_RESOURCES_USED\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12*\n\x1fINVALID_CYCLIC_DYNAMIC_STRATEGY\x10\x06\x1a\x05\xb2\x43\x02\x08$\x12\x32\n\'RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12\x36\n+REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN\x10\x08\x1a\x05\xb2\x43\x02\x08\x02\x12\x1d\n\x12STRATEGY_NOT_FOUND\x10\t\x1a\x05\xb2\x43\x02\x08\x02\x12)\n\x1e\x44YNAMIC_STRATEGY_NOT_SANDBOXED\x10\n\x1a\x05\xb2\x43\x02\x08\x02\"\x04\x08\x01\x10\x01\"\x04\x08\x02\x10\x02\"\x96\x05\n\x07\x43ommand\x12+\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1d.failure_details.Command.Code\"\xdd\x04\n\x04\x43ode\x12\"\n\x17\x43OMMAND_FAILURE_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1c\n\x11\x43OMMAND_NOT_FOUND\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\"\n\x17\x41NOTHER_COMMAND_RUNNING\x10\x02\x1a\x05\xb2\x43\x02\x08\t\x12\x1e\n\x13PREVIOUSLY_SHUTDOWN\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12;\n0STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12\x38\n-STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12\x32\n\'STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08$\x12*\n\x1fINVOCATION_POLICY_PARSE_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12$\n\x19INVOCATION_POLICY_INVALID\x10\x08\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15OPTIONS_PARSE_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08\x02\x12)\n\x1eSTARLARK_OPTIONS_PARSE_FAILURE\x10\n\x1a\x05\xb2\x43\x02\x08\x02\x12#\n\x18\x41RGUMENTS_NOT_RECOGNIZED\x10\x0b\x1a\x05\xb2\x43\x02\x08\x02\x12\x1b\n\x10NOT_IN_WORKSPACE\x10\x0c\x1a\x05\xb2\x43\x02\x08\x02\x12#\n\x18SPACES_IN_WORKSPACE_PATH\x10\r\x1a\x05\xb2\x43\x02\x08$\x12\x1e\n\x13IN_OUTPUT_DIRECTORY\x10\x0e\x1a\x05\xb2\x43\x02\x08\x02\"\xec\x01\n\nGrpcServer\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.GrpcServer.Code\"\xad\x01\n\x04\x43ode\x12\x1e\n\x13GRPC_SERVER_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12&\n\x1bGRPC_SERVER_NOT_COMPILED_IN\x10\x01\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13SERVER_BIND_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x15\n\nBAD_COOKIE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12 \n\x15NO_CLIENT_DESCRIPTION\x10\x04\x1a\x05\xb2\x43\x02\x08$\"\x04\x08\x05\x10\x05\"\x99\x01\n\x11\x43\x61nonicalizeFlags\x12\x35\n\x04\x63ode\x18\x01 \x01(\x0e\x32\'.failure_details.CanonicalizeFlags.Code\"M\n\x04\x43ode\x12%\n\x1a\x43\x41NONICALIZE_FLAGS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13\x46OR_COMMAND_INVALID\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\"\xc2\x04\n\x12\x42uildConfiguration\x12\x36\n\x04\x63ode\x18\x01 \x01(\x0e\x32(.failure_details.BuildConfiguration.Code\"\xf3\x03\n\x04\x43ode\x12&\n\x1b\x42UILD_CONFIGURATION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12.\n#PLATFORM_MAPPING_EVALUATION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12.\n#PLATFORM_MAPPINGS_FILE_IS_DIRECTORY\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12+\n PLATFORM_MAPPINGS_FILE_NOT_FOUND\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x33\n(TOP_LEVEL_CONFIGURATION_CREATION_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15INVALID_CONFIGURATION\x10\x05\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15INVALID_BUILD_OPTIONS\x10\x06\x1a\x05\xb2\x43\x02\x08\x02\x12!\n\x16MULTI_CPU_PREREQ_UNMET\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12\x33\n(HEURISTIC_INSTRUMENTATION_FILTER_INVALID\x10\x08\x1a\x05\xb2\x43\x02\x08\x02\x12\x10\n\x05\x43YCLE\x10\t\x1a\x05\xb2\x43\x02\x08\x02\x12%\n\x1a\x43ONFLICTING_CONFIGURATIONS\x10\n\x1a\x05\xb2\x43\x02\x08\x02\x12,\n!INVALID_OUTPUT_DIRECTORY_MNEMONIC\x10\x0b\x1a\x05\xb2\x43\x02\x08\x02\"\xe9\x01\n\x0bInfoCommand\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.InfoCommand.Code\"\xa8\x01\n\x04\x43ode\x12\x1f\n\x14INFO_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x18\n\rTOO_MANY_KEYS\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\x1d\n\x12KEY_NOT_RECOGNIZED\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12#\n\x18INFO_BLOCK_WRITE_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x07\x12!\n\x16\x41LL_INFO_WRITE_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\"\x83\x02\n\rMemoryOptions\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.MemoryOptions.Code\"\xbe\x01\n\x04\x43ode\x12!\n\x16MEMORY_OPTIONS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12@\n5EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12Q\nDDEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND\x10\x02\x1a\x07\x08\x01\xb2\x43\x02\x08\x02\"\xf2\x0b\n\x05Query\x12)\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1b.failure_details.Query.Code\"\xbd\x0b\n\x04\x43ode\x12\x18\n\rQUERY_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x32\n\'QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\"\n\x17QUERY_FILE_READ_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12*\n\x1f\x43OMMAND_LINE_EXPRESSION_MISSING\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15OUTPUT_FORMAT_INVALID\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\x12!\n\x16GRAPHLESS_PREREQ_UNMET\x10\x05\x1a\x05\xb2\x43\x02\x08\x02\x12%\n\x1aQUERY_OUTPUT_WRITE_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1aQUERY_STDOUT_FLUSH_FAILURE\x10\r\x1a\x05\xb2\x43\x02\x08$\x12&\n\x1b\x41NALYSIS_QUERY_PREREQ_UNMET\x10\x0e\x1a\x05\xb2\x43\x02\x08\x02\x12&\n\x1bQUERY_RESULTS_FLUSH_FAILURE\x10\x0f\x1a\x05\xb2\x43\x02\x08$\x12\x39\n.DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR\x10\x10\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15VARIABLE_NAME_INVALID\x10\x11\x1a\x05\xb2\x43\x02\x08\x07\x12\x1d\n\x12VARIABLE_UNDEFINED\x10\x12\x1a\x05\xb2\x43\x02\x08\x07\x12\x44\n9BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR\x10\x13\x1a\x05\xb2\x43\x02\x08\x02\x12\x1b\n\x10\x42UILD_FILE_ERROR\x10\x14\x1a\x05\xb2\x43\x02\x08\x07\x12\x10\n\x05\x43YCLE\x10\x15\x1a\x05\xb2\x43\x02\x08\x07\x12+\n UNIQUE_SKYKEY_THRESHOLD_EXCEEDED\x10\x16\x1a\x05\xb2\x43\x02\x08\x07\x12\'\n\x1cTARGET_NOT_IN_UNIVERSE_SCOPE\x10\x17\x1a\x05\xb2\x43\x02\x08\x02\x12+\n INVALID_FULL_UNIVERSE_EXPRESSION\x10\x18\x1a\x05\xb2\x43\x02\x08\x07\x12(\n\x1dUNIVERSE_SCOPE_LIMIT_EXCEEDED\x10\x19\x1a\x05\xb2\x43\x02\x08\x07\x12&\n\x1bINVALIDATION_LIMIT_EXCEEDED\x10\x1a\x1a\x05\xb2\x43\x02\x08\x07\x12%\n\x1aOUTPUT_FORMAT_PREREQ_UNMET\x10\x1b\x1a\x05\xb2\x43\x02\x08\x02\x12\x1c\n\x11\x41RGUMENTS_MISSING\x10\x1c\x1a\x05\xb2\x43\x02\x08\x07\x12\x31\n&RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY\x10\x1d\x1a\x05\xb2\x43\x02\x08\x07\x12%\n\x1a\x46ULL_TARGETS_NOT_SUPPORTED\x10\x1e\x1a\x05\xb2\x43\x02\x08\x07\x12,\n!DEPRECATED_UNEXPECTED_TOKEN_ERROR\x10\x1f\x1a\x05\xb2\x43\x02\x08\x02\x12-\n\"DEPRECATED_INTEGER_LITERAL_MISSING\x10 \x1a\x05\xb2\x43\x02\x08\x02\x12\x36\n+DEPRECATED_INVALID_STARTING_CHARACTER_ERROR\x10!\x1a\x05\xb2\x43\x02\x08\x02\x12\x32\n\'DEPRECATED_PREMATURE_END_OF_INPUT_ERROR\x10\"\x1a\x05\xb2\x43\x02\x08\x02\x12\x17\n\x0cSYNTAX_ERROR\x10#\x1a\x05\xb2\x43\x02\x08\x02\x12(\n\x1dOUTPUT_FORMATTER_IO_EXCEPTION\x10$\x1a\x05\xb2\x43\x02\x08$\x12+\n SKYQUERY_TRANSITIVE_TARGET_ERROR\x10%\x1a\x05\xb2\x43\x02\x08\x07\x12$\n\x19SKYQUERY_TARGET_EXCEPTION\x10&\x1a\x05\xb2\x43\x02\x08\x07\x12&\n\x1bINVALID_LABEL_IN_TEST_SUITE\x10\'\x1a\x05\xb2\x43\x02\x08\x07\x12#\n\x18ILLEGAL_FLAG_COMBINATION\x10(\x1a\x05\xb2\x43\x02\x08\x02\x12\x1d\n\x12NON_DETAILED_ERROR\x10)\x1a\x05\xb2\x43\x02\x08\x01\"\x04\x08\x07\x10\x0c\"\x99\x01\n\x0eLocalExecution\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.LocalExecution.Code\"S\n\x04\x43ode\x12\"\n\x17LOCAL_EXECUTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\'\n\x1cLOCKFREE_OUTPUT_PREREQ_UNMET\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\"\x8a\x01\n\x0b\x41\x63tionCache\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.ActionCache.Code\"J\n\x04\x43ode\x12\x1f\n\x14\x41\x43TION_CACHE_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12!\n\x16INITIALIZATION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08$\"\xe7\x01\n\x0c\x46\x65tchCommand\x12\x30\n\x04\x63ode\x18\x01 \x01(\x0e\x32\".failure_details.FetchCommand.Code\"\xa4\x01\n\x04\x43ode\x12 \n\x15\x46\x45TCH_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1d\n\x12\x45XPRESSION_MISSING\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\x1a\n\x0fOPTIONS_INVALID\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12\x1c\n\x11QUERY_PARSE_ERROR\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12!\n\x16QUERY_EVALUATION_ERROR\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\"\xf8\x01\n\x0bSyncCommand\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.SyncCommand.Code\"\xb7\x01\n\x04\x43ode\x12\x1f\n\x14SYNC_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1f\n\x14PACKAGE_LOOKUP_ERROR\x10\x01\x1a\x05\xb2\x43\x02\x08\x07\x12%\n\x1aWORKSPACE_EVALUATION_ERROR\x10\x02\x1a\x05\xb2\x43\x02\x08\x07\x12\"\n\x17REPOSITORY_FETCH_ERRORS\x10\x03\x1a\x05\xb2\x43\x02\x08\x07\x12\"\n\x17REPOSITORY_NAME_INVALID\x10\x04\x1a\x05\xb2\x43\x02\x08\x07\"\xfb\x03\n\x07Sandbox\x12+\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1d.failure_details.Sandbox.Code\"\xc2\x03\n\x04\x43ode\x12\"\n\x17SANDBOX_FAILURE_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12!\n\x16INITIALIZATION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12!\n\x16\x45XECUTION_IO_EXCEPTION\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12!\n\x16\x44OCKER_COMMAND_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0fNO_DOCKER_IMAGE\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12+\n DOCKER_IMAGE_PREPARATION_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1b\x42IND_MOUNT_ANALYSIS_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1bMOUNT_SOURCE_DOES_NOT_EXIST\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12,\n!MOUNT_SOURCE_TARGET_TYPE_MISMATCH\x10\x08\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1bMOUNT_TARGET_DOES_NOT_EXIST\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12\"\n\x17SUBPROCESS_START_FAILED\x10\n\x1a\x05\xb2\x43\x02\x08$\x12\x1a\n\x0f\x46ORBIDDEN_INPUT\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\"\x9d\x04\n\x0fIncludeScanning\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32%.failure_details.IncludeScanning.Code\x12\x42\n\x14package_loading_code\x18\x02 \x01(\x0e\x32$.failure_details.PackageLoading.Code\"\x90\x03\n\x04\x43ode\x12#\n\x18INCLUDE_SCANNING_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12)\n\x1eINITIALIZE_INCLUDE_HINTS_ERROR\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12 \n\x15SCANNING_IO_EXCEPTION\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12,\n!INCLUDE_HINTS_FILE_NOT_IN_PACKAGE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1aINCLUDE_HINTS_READ_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12 \n\x15ILLEGAL_ABSOLUTE_PATH\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14PACKAGE_LOAD_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12$\n\x19USER_PACKAGE_LOAD_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1bSYSTEM_PACKAGE_LOAD_FAILURE\x10\x08\x1a\x05\xb2\x43\x02\x08$\x12\x30\n%UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08\x01\"\xbc\x01\n\x0bTestCommand\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.TestCommand.Code\"|\n\x04\x43ode\x12\x1f\n\x14TEST_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1a\n\x0fNO_TEST_TARGETS\x10\x01\x1a\x05\xb2\x43\x02\x08\x04\x12\x1e\n\x13TEST_WITH_NOANALYZE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x17\n\x0cTESTS_FAILED\x10\x03\x1a\x05\xb2\x43\x02\x08\x03\"\xa6\x05\n\x0b\x41\x63tionQuery\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.ActionQuery.Code\"\xe5\x04\n\x04\x43ode\x12\x1f\n\x14\x41\x43TION_QUERY_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\x19\n\x0eOUTPUT_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x06\x12*\n\x1f\x43OMMAND_LINE_EXPRESSION_MISSING\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12#\n\x18\x45XPRESSION_PARSE_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\x12\x36\n+SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION\x10\x05\x1a\x05\xb2\x43\x02\x08\x02\x12$\n\x19INVALID_AQUERY_EXPRESSION\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1bSKYFRAME_STATE_PREREQ_UNMET\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15\x41QUERY_OUTPUT_TOO_BIG\x10\x08\x1a\x05\xb2\x43\x02\x08\x07\x12!\n\x16ILLEGAL_PATTERN_SYNTAX\x10\t\x1a\x05\xb2\x43\x02\x08\x02\x12\x1e\n\x13INCORRECT_ARGUMENTS\x10\n\x1a\x05\xb2\x43\x02\x08\x02\x12>\n3TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED\x10\x0b\x1a\x05\xb2\x43\x02\x08\x02\x12)\n\x1eSKYFRAME_STATE_AFTER_EXECUTION\x10\x0c\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dLABELS_FUNCTION_NOT_SUPPORTED\x10\r\x1a\x05\xb2\x43\x02\x08\x02\x12%\n\x1aTEMPLATE_EXPANSION_FAILURE\x10\x0e\x1a\x05\xb2\x43\x02\x08\x02\"\xd7\x06\n\x0eTargetPatterns\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.TargetPatterns.Code\"\x90\x06\n\x04\x43ode\x12\"\n\x17TARGET_PATTERNS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x38\n-TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12+\n TARGET_PATTERN_FILE_READ_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12\'\n\x1cTARGET_PATTERN_PARSE_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11PACKAGE_NOT_FOUND\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15TARGET_FORMAT_INVALID\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1f\x41\x42SOLUTE_TARGET_PATTERN_INVALID\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12\x30\n%CANNOT_DETERMINE_TARGET_FROM_FILENAME\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12\x1d\n\x12LABEL_SYNTAX_ERROR\x10\x08\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dTARGET_CANNOT_BE_EMPTY_STRING\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12+\n PACKAGE_PART_CANNOT_END_IN_SLASH\x10\n\x1a\x05\xb2\x43\x02\x08\x01\x12\x10\n\x05\x43YCLE\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15\x43\x41NNOT_PRELOAD_TARGET\x10\x0c\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0fTARGETS_MISSING\x10\r\x1a\x05\xb2\x43\x02\x08\x01\x12\x30\n%RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED\x10\x0e\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1fUP_LEVEL_REFERENCES_NOT_ALLOWED\x10\x0f\x1a\x05\xb2\x43\x02\x08\x01\x12.\n#NEGATIVE_TARGET_PATTERN_NOT_ALLOWED\x10\x10\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15TARGET_MUST_BE_A_FILE\x10\x11\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14\x44\x45PENDENCY_NOT_FOUND\x10\x12\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14PACKAGE_NAME_INVALID\x10\x13\x1a\x05\xb2\x43\x02\x08\x01\"\xf5\x03\n\x0c\x43leanCommand\x12\x30\n\x04\x63ode\x18\x01 \x01(\x0e\x32\".failure_details.CleanCommand.Code\"\xb2\x03\n\x04\x43ode\x12 \n\x15\x43LEAN_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\'\n\x1cOUTPUT_SERVICE_CLEAN_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x06\x12%\n\x1a\x41\x43TION_CACHE_CLEAN_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12 \n\x15OUT_ERR_CLOSE_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1aOUTPUT_BASE_DELETE_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12(\n\x1dOUTPUT_BASE_TEMP_MOVE_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12+\n ASYNC_OUTPUT_BASE_DELETE_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08\x06\x12\"\n\x17\x45XECROOT_DELETE_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1a\x45XECROOT_TEMP_MOVE_FAILURE\x10\x08\x1a\x05\xb2\x43\x02\x08$\x12(\n\x1d\x41SYNC_EXECROOT_DELETE_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08\x06\x12#\n\x18\x41RGUMENTS_NOT_RECOGNIZED\x10\n\x1a\x05\xb2\x43\x02\x08\x02\"\xb1\x01\n\rConfigCommand\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.ConfigCommand.Code\"m\n\x04\x43ode\x12!\n\x16\x43ONFIG_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13TOO_MANY_CONFIG_IDS\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\"\n\x17\x43ONFIGURATION_NOT_FOUND\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\"\xbb\x04\n\x11\x43onfigurableQuery\x12\x35\n\x04\x63ode\x18\x01 \x01(\x0e\x32\'.failure_details.ConfigurableQuery.Code\"\xee\x03\n\x04\x43ode\x12%\n\x1a\x43ONFIGURABLE_QUERY_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12*\n\x1f\x43OMMAND_LINE_EXPRESSION_MISSING\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12#\n\x18\x45XPRESSION_PARSE_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15\x46ILTERS_NOT_SUPPORTED\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12,\n!BUILDFILES_FUNCTION_NOT_SUPPORTED\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\x12*\n\x1fSIBLINGS_FUNCTION_NOT_SUPPORTED\x10\x05\x1a\x05\xb2\x43\x02\x08\x02\x12)\n\x1eVISIBLE_FUNCTION_NOT_SUPPORTED\x10\x06\x1a\x05\xb2\x43\x02\x08\x02\x12\x1c\n\x11\x41TTRIBUTE_MISSING\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12*\n\x1fINCORRECT_CONFIG_ARGUMENT_ERROR\x10\x08\x1a\x05\xb2\x43\x02\x08\x02\x12\x19\n\x0eTARGET_MISSING\x10\t\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15STARLARK_SYNTAX_ERROR\x10\n\x1a\x05\xb2\x43\x02\x08\x02\x12\x1e\n\x13STARLARK_EVAL_ERROR\x10\x0b\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15\x46ORMAT_FUNCTION_ERROR\x10\x0c\x1a\x05\xb2\x43\x02\x08\x02\"\xa9\x02\n\x0b\x44umpCommand\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.DumpCommand.Code\"\xe8\x01\n\x04\x43ode\x12\x1f\n\x14\x44UMP_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13NO_OUTPUT_SPECIFIED\x10\x01\x1a\x05\xb2\x43\x02\x08\x07\x12#\n\x18\x41\x43TION_CACHE_DUMP_FAILED\x10\x02\x1a\x05\xb2\x43\x02\x08\x07\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x07\x12#\n\x18\x41\x43TION_GRAPH_DUMP_FAILED\x10\x04\x1a\x05\xb2\x43\x02\x08\x07\x12$\n\x19STARLARK_HEAP_DUMP_FAILED\x10\x05\x1a\x05\xb2\x43\x02\x08\x08\"\x04\x08\x06\x10\x06\"\xa2\x01\n\x0bHelpCommand\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.HelpCommand.Code\"b\n\x04\x43ode\x12\x1f\n\x14HELP_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1b\n\x10MISSING_ARGUMENT\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\x1c\n\x11\x43OMMAND_NOT_FOUND\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\"\xb1\x02\n\rMobileInstall\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.MobileInstall.Code\"\xec\x01\n\x04\x43ode\x12!\n\x16MOBILE_INSTALL_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13\x43LASSIC_UNSUPPORTED\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\x1e\n\x13NO_TARGET_SPECIFIED\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12%\n\x1aMULTIPLE_TARGETS_SPECIFIED\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12\x1e\n\x13TARGET_TYPE_INVALID\x10\x04\x1a\x05\xb2\x43\x02\x08\x06\x12\x18\n\rNON_ZERO_EXIT\x10\x05\x1a\x05\xb2\x43\x02\x08\x06\x12 \n\x15\x45RROR_RUNNING_PROGRAM\x10\x06\x1a\x05\xb2\x43\x02\x08\x06\"\xb8\x01\n\x0eProfileCommand\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.ProfileCommand.Code\"r\n\x04\x43ode\x12\"\n\x17PROFILE_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12(\n\x1dOLD_BINARY_FORMAT_UNSUPPORTED\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11\x46ILE_READ_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\"\xc6\x05\n\nRunCommand\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.RunCommand.Code\"\x87\x05\n\x04\x43ode\x12\x1e\n\x13RUN_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13NO_TARGET_SPECIFIED\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12%\n\x1aTOO_MANY_TARGETS_SPECIFIED\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12 \n\x15TARGET_NOT_EXECUTABLE\x10\x03\x1a\x05\xb2\x43\x02\x08\x02\x12/\n$TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\x32\n\'TARGET_BUILT_BUT_PATH_VALIDATION_FAILED\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1aRUN_UNDER_TARGET_NOT_BUILT\x10\x06\x1a\x05\xb2\x43\x02\x08\x02\x12\x1b\n\x10RUN_PREREQ_UNMET\x10\x07\x1a\x05\xb2\x43\x02\x08\x02\x12\'\n\x1cTOO_MANY_TEST_SHARDS_OR_RUNS\x10\x08\x1a\x05\xb2\x43\x02\x08\x02\x12)\n\x1eTEST_ENVIRONMENT_SETUP_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08$\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\n\x1a\x05\xb2\x43\x02\x08$\x12\x1d\n\x12NO_SHELL_SPECIFIED\x10\x0b\x1a\x05\xb2\x43\x02\x08\x02\x12\x1f\n\x14SCRIPT_WRITE_FAILURE\x10\x0c\x1a\x05\xb2\x43\x02\x08\x06\x12\x30\n%RUNFILES_DIRECTORIES_CREATION_FAILURE\x10\r\x1a\x05\xb2\x43\x02\x08$\x12-\n\"RUNFILES_SYMLINKS_CREATION_FAILURE\x10\x0e\x1a\x05\xb2\x43\x02\x08$\x12-\n\"TEST_ENVIRONMENT_SETUP_INTERRUPTED\x10\x0f\x1a\x05\xb2\x43\x02\x08\x08\"\x8a\x01\n\x0eVersionCommand\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.VersionCommand.Code\"D\n\x04\x43ode\x12\"\n\x17VERSION_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x18\n\rNOT_AVAILABLE\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\"\x88\x02\n\x12PrintActionCommand\x12\x36\n\x04\x63ode\x18\x01 \x01(\x0e\x32(.failure_details.PrintActionCommand.Code\"\xb9\x01\n\x04\x43ode\x12\'\n\x1cPRINT_ACTION_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1b\n\x10TARGET_NOT_FOUND\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\"\n\x17TARGET_KIND_UNSUPPORTED\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11\x41\x43TIONS_NOT_FOUND\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\"\xc8\x02\n\x0fWorkspaceStatus\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32%.failure_details.WorkspaceStatus.Code\"\xff\x01\n\x04\x43ode\x12#\n\x18WORKSPACE_STATUS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x18\n\rNON_ZERO_EXIT\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14\x41\x42NORMAL_TERMINATION\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x16\n\x0b\x45XEC_FAILED\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x18\n\rPARSE_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\x12\x1d\n\x12VALIDATION_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1b\x43ONTENT_UPDATE_IO_EXCEPTION\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13STDERR_IO_EXCEPTION\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\"\x94\x02\n\x0bJavaCompile\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.JavaCompile.Code\"\xd3\x01\n\x04\x43ode\x12\x1f\n\x14JAVA_COMPILE_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12$\n\x19REDUCED_CLASSPATH_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\"\n\x17JDEPS_READ_IO_EXCEPTION\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12\x35\n*REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08$\"\xba\x01\n\x0f\x41\x63tionRewinding\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32%.failure_details.ActionRewinding.Code\"r\n\x04\x43ode\x12#\n\x18\x41\x43TION_REWINDING_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12$\n\x19LOST_INPUT_TOO_MANY_TIMES\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14LOST_INPUT_IS_SOURCE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\"\x8d\x04\n\nCppCompile\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.CppCompile.Code\"\xce\x03\n\x04\x43ode\x12\x1e\n\x13\x43PP_COMPILE_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12)\n\x1e\x46IND_USED_HEADERS_IO_EXCEPTION\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12\x1f\n\x14\x43OPY_OUT_ERR_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08$\x12\x1e\n\x13\x44_FILE_READ_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08$\x12%\n\x1a\x43OMMAND_GENERATION_FAILURE\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18MODULE_EXPANSION_TIMEOUT\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1eINCLUDE_PATH_OUTSIDE_EXEC_ROOT\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1f\x46\x41KE_COMMAND_GENERATION_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15UNDECLARED_INCLUSIONS\x10\x08\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14\x44_FILE_PARSE_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1f\x43OVERAGE_NOTES_CREATION_FAILURE\x10\n\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dMODULE_EXPANSION_MISSING_DATA\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\"\xc9\x01\n\x0eStarlarkAction\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.StarlarkAction.Code\"\x82\x01\n\x04\x43ode\x12\"\n\x17STARLARK_ACTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12)\n\x1eUNUSED_INPUT_LIST_READ_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12+\n UNUSED_INPUT_LIST_FILE_NOT_FOUND\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\"\xb8\x01\n\x0bNinjaAction\x12/\n\x04\x63ode\x18\x01 \x01(\x0e\x32!.failure_details.NinjaAction.Code\"x\n\x04\x43ode\x12\x1f\n\x14NINJA_ACTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12.\n#INVALID_DEPFILE_DECLARED_DEPENDENCY\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12\x1f\n\x14\x44_FILE_PARSE_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08$\"\xff\x01\n\x10\x44ynamicExecution\x12\x34\n\x04\x63ode\x18\x01 \x01(\x0e\x32&.failure_details.DynamicExecution.Code\"\xb4\x01\n\x04\x43ode\x12$\n\x19\x44YNAMIC_EXECUTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12%\n\x1aXCODE_RELATED_PREREQ_UNMET\x10\x01\x1a\x05\xb2\x43\x02\x08$\x12\"\n\x17\x41\x43TION_LOG_MOVE_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x16\n\x0bRUN_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18NO_USABLE_STRATEGY_FOUND\x10\x04\x1a\x05\xb2\x43\x02\x08\x02\"\x92\x03\n\nFailAction\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.FailAction.Code\"\xd3\x02\n\x04\x43ode\x12\x1e\n\x13\x46\x41IL_ACTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1e\n\x13INTENTIONAL_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18INCORRECT_PYTHON_VERSION\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12!\n\x16PROGUARD_SPECS_MISSING\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1d\x44YNAMIC_LINKING_NOT_SUPPORTED\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14SOURCE_FILES_MISSING\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13INCORRECT_TOOLCHAIN\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12!\n\x16\x46RAGMENT_CLASS_MISSING\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1e\x43\x41NT_BUILD_INCOMPATIBLE_TARGET\x10\n\x1a\x05\xb2\x43\x02\x08\x01\"\x04\x08\x08\x10\x08\"\x04\x08\t\x10\t\"\xb3\x02\n\rSymlinkAction\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.SymlinkAction.Code\"\xee\x01\n\x04\x43ode\x12!\n\x16SYMLINK_ACTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12$\n\x19\x45XECUTABLE_INPUT_NOT_FILE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\"\n\x17\x45XECUTABLE_INPUT_IS_NOT\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12.\n#EXECUTABLE_INPUT_CHECK_IO_EXCEPTION\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1aLINK_CREATION_IO_EXCEPTION\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\"\n\x17LINK_TOUCH_IO_EXCEPTION\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\"\xae\x01\n\x07\x43ppLink\x12+\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1d.failure_details.CppLink.Code\"v\n\x04\x43ode\x12\x1b\n\x10\x43PP_LINK_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12%\n\x1a\x43OMMAND_GENERATION_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1f\x46\x41KE_COMMAND_GENERATION_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\"\xd7\x01\n\tLtoAction\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.failure_details.LtoAction.Code\"\x9a\x01\n\x04\x43ode\x12\x1d\n\x12LTO_ACTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12+\n INVALID_ABSOLUTE_PATH_IN_IMPORTS\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15MISSING_BITCODE_FILES\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12$\n\x19IMPORTS_READ_IO_EXCEPTION\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\"\x96\x02\n\nTestAction\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32 .failure_details.TestAction.Code\"\xd7\x01\n\x04\x43ode\x12\x1e\n\x13TEST_ACTION_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12%\n\x1aNO_KEEP_GOING_TEST_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\"\n\x17LOCAL_TEST_PREREQ_UNMET\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1e\x43OMMAND_LINE_EXPANSION_FAILURE\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1d\n\x12\x44UPLICATE_CPU_TAGS\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0fINVALID_CPU_TAG\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\"\xf1\x03\n\x06Worker\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.failure_details.Worker.Code\"\xba\x03\n\x04\x43ode\x12\x19\n\x0eWORKER_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12/\n$MULTIPLEXER_INSTANCE_REMOVAL_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1aMULTIPLEXER_DOES_NOT_EXIST\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x13\n\x08NO_TOOLS\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x16\n\x0bNO_FLAGFILE\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\x30\n%VIRTUAL_INPUT_MATERIALIZATION_FAILURE\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12\x19\n\x0e\x42ORROW_FAILURE\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12\x1b\n\x10PREFETCH_FAILURE\x10\x07\x1a\x05\xb2\x43\x02\x08$\x12\x1a\n\x0fPREPARE_FAILURE\x10\x08\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0fREQUEST_FAILURE\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12!\n\x16PARSE_RESPONSE_FAILURE\x10\n\x1a\x05\xb2\x43\x02\x08\x01\x12\x16\n\x0bNO_RESPONSE\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\x12\x19\n\x0e\x46INISH_FAILURE\x10\x0c\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0f\x46ORBIDDEN_INPUT\x10\r\x1a\x05\xb2\x43\x02\x08\x01\"\xbe\x06\n\x08\x41nalysis\x12,\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1e.failure_details.Analysis.Code\"\x83\x06\n\x04\x43ode\x12\x1b\n\x10\x41NALYSIS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x17\n\x0cLOAD_FAILURE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dGENERIC_LOADING_PHASE_FAILURE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18NOT_ALL_TARGETS_ANALYZED\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x10\n\x05\x43YCLE\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\x31\n&PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12$\n\x19\x41SPECT_LABEL_SYNTAX_ERROR\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13\x41SPECT_PREREQ_UNMET\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12\x1b\n\x10\x41SPECT_NOT_FOUND\x10\x08\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0f\x41\x43TION_CONFLICT\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18\x41RTIFACT_PREFIX_CONFLICT\x10\n\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dUNEXPECTED_ANALYSIS_EXCEPTION\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\x12\'\n\x1cTARGETS_MISSING_ENVIRONMENTS\x10\x0c\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13INVALID_ENVIRONMENT\x10\r\x1a\x05\xb2\x43\x02\x08\x01\x12*\n\x1f\x45NVIRONMENT_MISSING_FROM_GROUPS\x10\x0e\x1a\x05\xb2\x43\x02\x08\x01\x12\x1d\n\x12\x45XEC_GROUP_MISSING\x10\x0f\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1aINVALID_EXECUTION_PLATFORM\x10\x10\x1a\x05\xb2\x43\x02\x08\x01\x12!\n\x16\x41SPECT_CREATION_FAILED\x10\x11\x1a\x05\xb2\x43\x02\x08\x01\x12+\n CONFIGURED_VALUE_CREATION_FAILED\x10\x12\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dINCOMPATIBLE_TARGET_REQUESTED\x10\x13\x1a\x05\xb2\x43\x02\x08\x01\x12.\n#ANALYSIS_FAILURE_PROPAGATION_FAILED\x10\x14\x1a\x05\xb2\x43\x02\x08\x01\"\xb8\t\n\x0ePackageLoading\x12\x32\n\x04\x63ode\x18\x01 \x01(\x0e\x32$.failure_details.PackageLoading.Code\"\xf1\x08\n\x04\x43ode\x12\"\n\x17PACKAGE_LOADING_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1f\n\x14WORKSPACE_FILE_ERROR\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1eMAX_COMPUTATION_STEPS_EXCEEDED\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x1d\n\x12\x42UILD_FILE_MISSING\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1d\n\x12REPOSITORY_MISSING\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\x33\n(PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR\x10\x05\x1a\x05\xb2\x43\x02\x08$\x12\x32\n\'TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR\x10\x06\x1a\x05\xb2\x43\x02\x08$\x12\x17\n\x0cINVALID_NAME\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18\x45VAL_GLOBS_SYMLINK_ERROR\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1aIMPORT_STARLARK_FILE_ERROR\x10\n\x1a\x05\xb2\x43\x02\x08\x01\x12\x1a\n\x0fPACKAGE_MISSING\x10\x0b\x1a\x05\xb2\x43\x02\x08\x01\x12\x19\n\x0eTARGET_MISSING\x10\x0c\x1a\x05\xb2\x43\x02\x08\x01\x12\x18\n\rNO_SUCH_THING\x10\r\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11GLOB_IO_EXCEPTION\x10\x0e\x1a\x05\xb2\x43\x02\x08$\x12\x1a\n\x0f\x44UPLICATE_LABEL\x10\x0f\x1a\x05\xb2\x43\x02\x08\x01\x12(\n\x1dINVALID_PACKAGE_SPECIFICATION\x10\x10\x1a\x05\xb2\x43\x02\x08\x01\x12\x17\n\x0cSYNTAX_ERROR\x10\x11\x1a\x05\xb2\x43\x02\x08\x01\x12+\n ENVIRONMENT_IN_DIFFERENT_PACKAGE\x10\x12\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1e\x44\x45\x46\x41ULT_ENVIRONMENT_UNDECLARED\x10\x13\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1e\x45NVIRONMENT_IN_MULTIPLE_GROUPS\x10\x14\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1a\x45NVIRONMENT_DOES_NOT_EXIST\x10\x15\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13\x45NVIRONMENT_INVALID\x10\x16\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18\x45NVIRONMENT_NOT_IN_GROUP\x10\x17\x1a\x05\xb2\x43\x02\x08\x01\x12\x1f\n\x14PACKAGE_NAME_INVALID\x10\x18\x1a\x05\xb2\x43\x02\x08\x01\x12\x1e\n\x13STARLARK_EVAL_ERROR\x10\x19\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15LICENSE_PARSE_FAILURE\x10\x1a\x1a\x05\xb2\x43\x02\x08\x01\x12&\n\x1b\x44ISTRIBUTIONS_PARSE_FAILURE\x10\x1b\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1eLABEL_CROSSES_PACKAGE_BOUNDARY\x10\x1c\x1a\x05\xb2\x43\x02\x08\x01\x12%\n\x1a\x42UILTINS_INJECTION_FAILURE\x10\x1d\x1a\x05\xb2\x43\x02\x08\x01\x12.\n#SYMLINK_CYCLE_OR_INFINITE_EXPANSION\x10\x1e\x1a\x05\xb2\x43\x02\x08\x01\x12\x1d\n\x12OTHER_IO_EXCEPTION\x10\x1f\x1a\x05\xb2\x43\x02\x08$\"\x04\x08\x08\x10\x08\"\xd4\x02\n\tToolchain\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.failure_details.Toolchain.Code\"\x97\x02\n\x04\x43ode\x12\x1c\n\x11TOOLCHAIN_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1b\n\x10MISSING_PROVIDER\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12#\n\x18INVALID_CONSTRAINT_VALUE\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12!\n\x16INVALID_PLATFORM_VALUE\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11INVALID_TOOLCHAIN\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1eNO_MATCHING_EXECUTION_PLATFORM\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12 \n\x15NO_MATCHING_TOOLCHAIN\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12!\n\x16INVALID_TOOLCHAIN_TYPE\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\"\x8e\x03\n\x0fStarlarkLoading\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32%.failure_details.StarlarkLoading.Code\"\xc5\x02\n\x04\x43ode\x12#\n\x18STARLARK_LOADING_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x10\n\x05\x43YCLE\x10\x01\x1a\x05\xb2\x43\x02\x08\x01\x12\x18\n\rCOMPILE_ERROR\x10\x02\x1a\x05\xb2\x43\x02\x08\x01\x12\x16\n\x0bPARSE_ERROR\x10\x03\x1a\x05\xb2\x43\x02\x08\x01\x12\x15\n\nEVAL_ERROR\x10\x04\x1a\x05\xb2\x43\x02\x08\x01\x12\'\n\x1c\x43ONTAINING_PACKAGE_NOT_FOUND\x10\x05\x1a\x05\xb2\x43\x02\x08\x01\x12\x1c\n\x11PACKAGE_NOT_FOUND\x10\x06\x1a\x05\xb2\x43\x02\x08\x01\x12\x13\n\x08IO_ERROR\x10\x07\x1a\x05\xb2\x43\x02\x08\x01\x12)\n\x1eLABEL_CROSSES_PACKAGE_BOUNDARY\x10\x08\x1a\x05\xb2\x43\x02\x08\x01\x12\x19\n\x0e\x42UILTINS_ERROR\x10\t\x1a\x05\xb2\x43\x02\x08\x01\x12\x1b\n\x10VISIBILITY_ERROR\x10\n\x1a\x05\xb2\x43\x02\x08\x01\"\x8a\x02\n\x0c\x45xternalDeps\x12\x30\n\x04\x63ode\x18\x01 \x01(\x0e\x32\".failure_details.ExternalDeps.Code\"\xc7\x01\n\x04\x43ode\x12 \n\x15\x45XTERNAL_DEPS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1b\n\x10MODULE_NOT_FOUND\x10\x01\x1a\x05\xb2\x43\x02\x08\x30\x12\x15\n\nBAD_MODULE\x10\x02\x1a\x05\xb2\x43\x02\x08\x30\x12#\n\x18VERSION_RESOLUTION_ERROR\x10\x03\x1a\x05\xb2\x43\x02\x08\x30\x12\x1f\n\x14INVALID_REGISTRY_URL\x10\x04\x1a\x05\xb2\x43\x02\x08\x30\x12#\n\x18\x45RROR_ACCESSING_REGISTRY\x10\x05\x1a\x05\xb2\x43\x02\x08 \"\x8a\x01\n\rDiffAwareness\x12\x31\n\x04\x63ode\x18\x01 \x01(\x0e\x32#.failure_details.DiffAwareness.Code\"F\n\x04\x43ode\x12!\n\x16\x44IFF_AWARENESS_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1b\n\x10\x44IFF_STAT_FAILED\x10\x01\x1a\x05\xb2\x43\x02\x08$\"\xcf\x01\n\x0fModqueryCommand\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32%.failure_details.ModqueryCommand.Code\"\x86\x01\n\x04\x43ode\x12#\n\x18MODQUERY_COMMAND_UNKNOWN\x10\x00\x1a\x05\xb2\x43\x02\x08%\x12\x1c\n\x11MISSING_ARGUMENTS\x10\x01\x1a\x05\xb2\x43\x02\x08\x02\x12\x1d\n\x12TOO_MANY_ARGUMENTS\x10\x02\x1a\x05\xb2\x43\x02\x08\x02\x12\x1c\n\x11INVALID_ARGUMENTS\x10\x03\x1a\x05\xb2\x43\x02\x08\x02:\\\n\x08metadata\x12!.google.protobuf.EnumValueOptions\x18\xb6\x08 \x01(\x0b\x32&.failure_details.FailureDetailMetadataB&\n$com.google.devtools.build.lib.serverb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'src.main.protobuf.failure_details_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(metadata)
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n$com.google.devtools.build.lib.server'
_INTERRUPTED_CODE.values_by_name["INTERRUPTED_UNKNOWN"]._options = None
_INTERRUPTED_CODE.values_by_name["INTERRUPTED_UNKNOWN"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["INTERRUPTED"]._options = None
_INTERRUPTED_CODE.values_by_name["INTERRUPTED"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_BUILD"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_BUILD"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_BUILD_COMPLETION"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_BUILD_COMPLETION"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_PACKAGE_LOADING_SYNC"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_PACKAGE_LOADING_SYNC"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_EXECUTOR_COMPLETION"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_EXECUTOR_COMPLETION"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_COMMAND_DISPATCH"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_COMMAND_DISPATCH"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_INFO_ITEM"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_INFO_ITEM"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_AFTER_QUERY"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_AFTER_QUERY"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_FETCH_COMMAND"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_FETCH_COMMAND"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_SYNC_COMMAND"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_SYNC_COMMAND"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_CLEAN_COMMAND"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_CLEAN_COMMAND"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_MOBILE_INSTALL_COMMAND"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_MOBILE_INSTALL_COMMAND"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_QUERY"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_QUERY"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_RUN_COMMAND"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_RUN_COMMAND"]._serialized_options = b'\262C\002\010\010'
_INTERRUPTED_CODE.values_by_name["DEPRECATED_OPTIONS_PARSING"]._options = None
_INTERRUPTED_CODE.values_by_name["DEPRECATED_OPTIONS_PARSING"]._serialized_options = b'\262C\002\010\010'
_SPAWN_CODE.values_by_name["SPAWN_UNKNOWN"]._options = None
_SPAWN_CODE.values_by_name["SPAWN_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_SPAWN_CODE.values_by_name["NON_ZERO_EXIT"]._options = None
_SPAWN_CODE.values_by_name["NON_ZERO_EXIT"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["TIMEOUT"]._options = None
_SPAWN_CODE.values_by_name["TIMEOUT"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["OUT_OF_MEMORY"]._options = None
_SPAWN_CODE.values_by_name["OUT_OF_MEMORY"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["EXECUTION_FAILED"]._options = None
_SPAWN_CODE.values_by_name["EXECUTION_FAILED"]._serialized_options = b'\262C\002\010\"'
_SPAWN_CODE.values_by_name["EXECUTION_DENIED"]._options = None
_SPAWN_CODE.values_by_name["EXECUTION_DENIED"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["REMOTE_CACHE_FAILED"]._options = None
_SPAWN_CODE.values_by_name["REMOTE_CACHE_FAILED"]._serialized_options = b'\262C\002\010\"'
_SPAWN_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_SPAWN_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["EXEC_IO_EXCEPTION"]._options = None
_SPAWN_CODE.values_by_name["EXEC_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_SPAWN_CODE.values_by_name["INVALID_TIMEOUT"]._options = None
_SPAWN_CODE.values_by_name["INVALID_TIMEOUT"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["INVALID_REMOTE_EXECUTION_PROPERTIES"]._options = None
_SPAWN_CODE.values_by_name["INVALID_REMOTE_EXECUTION_PROPERTIES"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["NO_USABLE_STRATEGY_FOUND"]._options = None
_SPAWN_CODE.values_by_name["NO_USABLE_STRATEGY_FOUND"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["UNSPECIFIED_EXECUTION_FAILURE"]._options = None
_SPAWN_CODE.values_by_name["UNSPECIFIED_EXECUTION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["FORBIDDEN_INPUT"]._options = None
_SPAWN_CODE.values_by_name["FORBIDDEN_INPUT"]._serialized_options = b'\262C\002\010\001'
_SPAWN_CODE.values_by_name["REMOTE_CACHE_EVICTED"]._options = None
_SPAWN_CODE.values_by_name["REMOTE_CACHE_EVICTED"]._serialized_options = b'\262C\002\010\''
_EXTERNALREPOSITORY_CODE.values_by_name["EXTERNAL_REPOSITORY_UNKNOWN"]._options = None
_EXTERNALREPOSITORY_CODE.values_by_name["EXTERNAL_REPOSITORY_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_EXTERNALREPOSITORY_CODE.values_by_name["OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES"]._options = None
_EXTERNALREPOSITORY_CODE.values_by_name["OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES"]._serialized_options = b'\262C\002\010\002'
_EXTERNALREPOSITORY_CODE.values_by_name["BAD_DOWNLOADER_CONFIG"]._options = None
_EXTERNALREPOSITORY_CODE.values_by_name["BAD_DOWNLOADER_CONFIG"]._serialized_options = b'\262C\002\010\002'
_EXTERNALREPOSITORY_CODE.values_by_name["REPOSITORY_MAPPING_RESOLUTION_FAILED"]._options = None
_EXTERNALREPOSITORY_CODE.values_by_name["REPOSITORY_MAPPING_RESOLUTION_FAILED"]._serialized_options = b'\262C\002\010%'
_BUILDPROGRESS_CODE.values_by_name["BUILD_PROGRESS_UNKNOWN"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BUILD_PROGRESS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_BUILDPROGRESS_CODE.values_by_name["OUTPUT_INITIALIZATION"]._options = None
_BUILDPROGRESS_CODE.values_by_name["OUTPUT_INITIALIZATION"]._serialized_options = b'\262C\002\010$'
_BUILDPROGRESS_CODE.values_by_name["BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED"]._serialized_options = b'\262C\002\010\002'
_BUILDPROGRESS_CODE.values_by_name["BES_LOCAL_WRITE_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_LOCAL_WRITE_ERROR"]._serialized_options = b'\262C\002\010$'
_BUILDPROGRESS_CODE.values_by_name["BES_INITIALIZATION_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_INITIALIZATION_ERROR"]._serialized_options = b'\262C\002\010$'
_BUILDPROGRESS_CODE.values_by_name["BES_UPLOAD_TIMEOUT_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_UPLOAD_TIMEOUT_ERROR"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_TIMEOUT"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_TIMEOUT"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_IO_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_IO_ERROR"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_INTERRUPTED"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_INTERRUPTED"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_CANCELED"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_CANCELED"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_UNKNOWN_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_FILE_WRITE_UNKNOWN_ERROR"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_UPLOAD_LOCAL_FILE_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_UPLOAD_LOCAL_FILE_ERROR"]._serialized_options = b'\262C\002\010&'
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_NOT_RETRYING_FAILURE"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_NOT_RETRYING_FAILURE"]._serialized_options = b'\262C\002\010-'
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR"]._serialized_options = b'\262C\002\010-'
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR"]._serialized_options = b'\262C\002\010-'
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_COMPLETED_WITH_REMOTE_ERROR"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_STREAM_COMPLETED_WITH_REMOTE_ERROR"]._serialized_options = b'\262C\002\010-'
_BUILDPROGRESS_CODE.values_by_name["BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE"]._options = None
_BUILDPROGRESS_CODE.values_by_name["BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE"]._serialized_options = b'\262C\002\010&'
_REMOTEOPTIONS_CODE.values_by_name["REMOTE_OPTIONS_UNKNOWN"]._options = None
_REMOTEOPTIONS_CODE.values_by_name["REMOTE_OPTIONS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_REMOTEOPTIONS_CODE.values_by_name["REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR"]._options = None
_REMOTEOPTIONS_CODE.values_by_name["REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR"]._serialized_options = b'\262C\002\010\002'
_REMOTEOPTIONS_CODE.values_by_name["CREDENTIALS_READ_FAILURE"]._options = None
_REMOTEOPTIONS_CODE.values_by_name["CREDENTIALS_READ_FAILURE"]._serialized_options = b'\262C\002\010$'
_REMOTEOPTIONS_CODE.values_by_name["CREDENTIALS_WRITE_FAILURE"]._options = None
_REMOTEOPTIONS_CODE.values_by_name["CREDENTIALS_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_REMOTEOPTIONS_CODE.values_by_name["DOWNLOADER_WITHOUT_GRPC_CACHE"]._options = None
_REMOTEOPTIONS_CODE.values_by_name["DOWNLOADER_WITHOUT_GRPC_CACHE"]._serialized_options = b'\262C\002\010\002'
_REMOTEOPTIONS_CODE.values_by_name["EXECUTION_WITH_INVALID_CACHE"]._options = None
_REMOTEOPTIONS_CODE.values_by_name["EXECUTION_WITH_INVALID_CACHE"]._serialized_options = b'\262C\002\010\002'
_CLIENTENVIRONMENT_CODE.values_by_name["CLIENT_ENVIRONMENT_UNKNOWN"]._options = None
_CLIENTENVIRONMENT_CODE.values_by_name["CLIENT_ENVIRONMENT_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CLIENTENVIRONMENT_CODE.values_by_name["CLIENT_CWD_MALFORMED"]._options = None
_CLIENTENVIRONMENT_CODE.values_by_name["CLIENT_CWD_MALFORMED"]._serialized_options = b'\262C\002\010\002'
_CRASH_CODE.values_by_name["CRASH_UNKNOWN"]._options = None
_CRASH_CODE.values_by_name["CRASH_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CRASH_CODE.values_by_name["CRASH_OOM"]._options = None
_CRASH_CODE.values_by_name["CRASH_OOM"]._serialized_options = b'\262C\002\010!'
_SYMLINKFOREST_CODE.values_by_name["SYMLINK_FOREST_UNKNOWN"]._options = None
_SYMLINKFOREST_CODE.values_by_name["SYMLINK_FOREST_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_SYMLINKFOREST_CODE.values_by_name["TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT"]._options = None
_SYMLINKFOREST_CODE.values_by_name["TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT"]._serialized_options = b'\262C\002\010\002'
_SYMLINKFOREST_CODE.values_by_name["TOPLEVEL_OUTDIR_USED_AS_SOURCE"]._options = None
_SYMLINKFOREST_CODE.values_by_name["TOPLEVEL_OUTDIR_USED_AS_SOURCE"]._serialized_options = b'\262C\002\010\002'
_SYMLINKFOREST_CODE.values_by_name["CREATION_FAILED"]._options = None
_SYMLINKFOREST_CODE.values_by_name["CREATION_FAILED"]._serialized_options = b'\262C\002\010\002'
_BUILDREPORT_CODE.values_by_name["BUILD_REPORT_UNKNOWN"]._options = None
_BUILDREPORT_CODE.values_by_name["BUILD_REPORT_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_BUILDREPORT_CODE.values_by_name["BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS"]._options = None
_BUILDREPORT_CODE.values_by_name["BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS"]._serialized_options = b'\262C\002\010$'
_BUILDREPORT_CODE.values_by_name["BUILD_REPORT_WRITE_FAILED"]._options = None
_BUILDREPORT_CODE.values_by_name["BUILD_REPORT_WRITE_FAILED"]._serialized_options = b'\262C\002\010$'
_PACKAGEOPTIONS_CODE.values_by_name["PACKAGE_OPTIONS_UNKNOWN"]._options = None
_PACKAGEOPTIONS_CODE.values_by_name["PACKAGE_OPTIONS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_PACKAGEOPTIONS_CODE.values_by_name["PACKAGE_PATH_INVALID"]._options = None
_PACKAGEOPTIONS_CODE.values_by_name["PACKAGE_PATH_INVALID"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_EXECUTION_UNKNOWN"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_EXECUTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_REMOTEEXECUTION_CODE.values_by_name["CAPABILITIES_QUERY_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["CAPABILITIES_QUERY_FAILURE"]._serialized_options = b'\262C\002\010\"'
_REMOTEEXECUTION_CODE.values_by_name["CREDENTIALS_INIT_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["CREDENTIALS_INIT_FAILURE"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["CACHE_INIT_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["CACHE_INIT_FAILURE"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["RPC_LOG_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["RPC_LOG_FAILURE"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["EXEC_CHANNEL_INIT_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["EXEC_CHANNEL_INIT_FAILURE"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["CACHE_CHANNEL_INIT_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["CACHE_CHANNEL_INIT_FAILURE"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["DOWNLOADER_CHANNEL_INIT_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["DOWNLOADER_CHANNEL_INIT_FAILURE"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["LOG_DIR_CLEANUP_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["LOG_DIR_CLEANUP_FAILURE"]._serialized_options = b'\262C\002\010$'
_REMOTEEXECUTION_CODE.values_by_name["CLIENT_SERVER_INCOMPATIBLE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["CLIENT_SERVER_INCOMPATIBLE"]._serialized_options = b'\262C\002\010\"'
_REMOTEEXECUTION_CODE.values_by_name["DOWNLOADED_INPUTS_DELETION_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["DOWNLOADED_INPUTS_DELETION_FAILURE"]._serialized_options = b'\262C\002\010\"'
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS"]._serialized_options = b'\262C\002\010\002'
_REMOTEEXECUTION_CODE.values_by_name["INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE"]._serialized_options = b'\262C\002\010$'
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_REMOTEEXECUTION_CODE.values_by_name["ILLEGAL_OUTPUT"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["ILLEGAL_OUTPUT"]._serialized_options = b'\262C\002\010\001'
_REMOTEEXECUTION_CODE.values_by_name["INVALID_EXEC_AND_PLATFORM_PROPERTIES"]._options = None
_REMOTEEXECUTION_CODE.values_by_name["INVALID_EXEC_AND_PLATFORM_PROPERTIES"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["EXECUTION_UNKNOWN"]._options = None
_EXECUTION_CODE.values_by_name["EXECUTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_EXECUTION_CODE.values_by_name["EXECUTION_LOG_INITIALIZATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["EXECUTION_LOG_INITIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010\002'
_EXECUTION_CODE.values_by_name["EXECUTION_LOG_WRITE_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["EXECUTION_LOG_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["EXECROOT_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["EXECROOT_CREATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["LOCAL_TEMPLATE_EXPANSION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["LOCAL_TEMPLATE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["INPUT_DIRECTORY_CHECK_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["INPUT_DIRECTORY_CHECK_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["EXTRA_ACTION_OUTPUT_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["EXTRA_ACTION_OUTPUT_CREATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["TEST_RUNNER_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["TEST_RUNNER_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["FILE_WRITE_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["FILE_WRITE_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["TEST_OUT_ERR_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["TEST_OUT_ERR_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_CREATION_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_CREATION_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_CREATION_COMMAND_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["SYMLINK_TREE_CREATION_COMMAND_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["ACTION_INPUT_READ_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_INPUT_READ_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["ACTION_NOT_UP_TO_DATE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_NOT_UP_TO_DATE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["PSEUDO_ACTION_EXECUTION_PROHIBITED"]._options = None
_EXECUTION_CODE.values_by_name["PSEUDO_ACTION_EXECUTION_PROHIBITED"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["DISCOVERED_INPUT_DOES_NOT_EXIST"]._options = None
_EXECUTION_CODE.values_by_name["DISCOVERED_INPUT_DOES_NOT_EXIST"]._serialized_options = b'\262C\002\010$'
_EXECUTION_CODE.values_by_name["ACTION_OUTPUTS_DELETION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_OUTPUTS_DELETION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_OUTPUTS_NOT_CREATED"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_OUTPUTS_NOT_CREATED"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_FINALIZATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_FINALIZATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_INPUT_LOST"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_INPUT_LOST"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["FILESYSTEM_CONTEXT_UPDATE_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["FILESYSTEM_CONTEXT_UPDATE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_OUTPUT_CLOSE_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_OUTPUT_CLOSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["INPUT_DISCOVERY_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["INPUT_DISCOVERY_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["NON_ACTION_EXECUTION_FAILURE"]._options = None
_EXECUTION_CODE.values_by_name["NON_ACTION_EXECUTION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["CYCLE"]._options = None
_EXECUTION_CODE.values_by_name["CYCLE"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["SOURCE_INPUT_MISSING"]._options = None
_EXECUTION_CODE.values_by_name["SOURCE_INPUT_MISSING"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["UNEXPECTED_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["UNEXPECTED_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_EXECUTION_CODE.values_by_name["SOURCE_INPUT_IO_EXCEPTION"]._options = None
_EXECUTION_CODE.values_by_name["SOURCE_INPUT_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_WORKSPACES_CODE.values_by_name["WORKSPACES_UNKNOWN"]._options = None
_WORKSPACES_CODE.values_by_name["WORKSPACES_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_WORKSPACES_CODE.values_by_name["WORKSPACES_LOG_INITIALIZATION_FAILURE"]._options = None
_WORKSPACES_CODE.values_by_name["WORKSPACES_LOG_INITIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010\002'
_WORKSPACES_CODE.values_by_name["WORKSPACES_LOG_WRITE_FAILURE"]._options = None
_WORKSPACES_CODE.values_by_name["WORKSPACES_LOG_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_WORKSPACES_CODE.values_by_name["ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES"]._options = None
_WORKSPACES_CODE.values_by_name["ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES"]._serialized_options = b'\262C\002\010\001'
_WORKSPACES_CODE.values_by_name["WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES"]._options = None
_WORKSPACES_CODE.values_by_name["WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES"]._serialized_options = b'\262C\002\010\001'
_CRASHOPTIONS_CODE.values_by_name["CRASH_OPTIONS_UNKNOWN"]._options = None
_CRASHOPTIONS_CODE.values_by_name["CRASH_OPTIONS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_FILESYSTEM_CODE.values_by_name["FILESYSTEM_UNKNOWN"]._options = None
_FILESYSTEM_CODE.values_by_name["FILESYSTEM_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_FILESYSTEM_CODE.values_by_name["EMBEDDED_BINARIES_ENUMERATION_FAILURE"]._options = None
_FILESYSTEM_CODE.values_by_name["EMBEDDED_BINARIES_ENUMERATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_FILESYSTEM_CODE.values_by_name["SERVER_PID_TXT_FILE_READ_FAILURE"]._options = None
_FILESYSTEM_CODE.values_by_name["SERVER_PID_TXT_FILE_READ_FAILURE"]._serialized_options = b'\262C\002\010$'
_FILESYSTEM_CODE.values_by_name["SERVER_FILE_WRITE_FAILURE"]._options = None
_FILESYSTEM_CODE.values_by_name["SERVER_FILE_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_FILESYSTEM_CODE.values_by_name["DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE"]._options = None
_FILESYSTEM_CODE.values_by_name["DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE"]._serialized_options = b'\262C\002\010\002'
_EXECUTIONOPTIONS_CODE.values_by_name["EXECUTION_OPTIONS_UNKNOWN"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["EXECUTION_OPTIONS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_EXECUTIONOPTIONS_CODE.values_by_name["INVALID_STRATEGY"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["INVALID_STRATEGY"]._serialized_options = b'\262C\002\010\002'
_EXECUTIONOPTIONS_CODE.values_by_name["REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING"]._serialized_options = b'\262C\002\010$'
_EXECUTIONOPTIONS_CODE.values_by_name["DEPRECATED_LOCAL_RESOURCES_USED"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["DEPRECATED_LOCAL_RESOURCES_USED"]._serialized_options = b'\262C\002\010$'
_EXECUTIONOPTIONS_CODE.values_by_name["INVALID_CYCLIC_DYNAMIC_STRATEGY"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["INVALID_CYCLIC_DYNAMIC_STRATEGY"]._serialized_options = b'\262C\002\010$'
_EXECUTIONOPTIONS_CODE.values_by_name["RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT"]._serialized_options = b'\262C\002\010\002'
_EXECUTIONOPTIONS_CODE.values_by_name["REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN"]._serialized_options = b'\262C\002\010\002'
_EXECUTIONOPTIONS_CODE.values_by_name["STRATEGY_NOT_FOUND"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["STRATEGY_NOT_FOUND"]._serialized_options = b'\262C\002\010\002'
_EXECUTIONOPTIONS_CODE.values_by_name["DYNAMIC_STRATEGY_NOT_SANDBOXED"]._options = None
_EXECUTIONOPTIONS_CODE.values_by_name["DYNAMIC_STRATEGY_NOT_SANDBOXED"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["COMMAND_FAILURE_UNKNOWN"]._options = None
_COMMAND_CODE.values_by_name["COMMAND_FAILURE_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_COMMAND_CODE.values_by_name["COMMAND_NOT_FOUND"]._options = None
_COMMAND_CODE.values_by_name["COMMAND_NOT_FOUND"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["ANOTHER_COMMAND_RUNNING"]._options = None
_COMMAND_CODE.values_by_name["ANOTHER_COMMAND_RUNNING"]._serialized_options = b'\262C\002\010\t'
_COMMAND_CODE.values_by_name["PREVIOUSLY_SHUTDOWN"]._options = None
_COMMAND_CODE.values_by_name["PREVIOUSLY_SHUTDOWN"]._serialized_options = b'\262C\002\010$'
_COMMAND_CODE.values_by_name["STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE"]._options = None
_COMMAND_CODE.values_by_name["STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_COMMAND_CODE.values_by_name["STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE"]._options = None
_COMMAND_CODE.values_by_name["STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_COMMAND_CODE.values_by_name["STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE"]._options = None
_COMMAND_CODE.values_by_name["STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_COMMAND_CODE.values_by_name["INVOCATION_POLICY_PARSE_FAILURE"]._options = None
_COMMAND_CODE.values_by_name["INVOCATION_POLICY_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["INVOCATION_POLICY_INVALID"]._options = None
_COMMAND_CODE.values_by_name["INVOCATION_POLICY_INVALID"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["OPTIONS_PARSE_FAILURE"]._options = None
_COMMAND_CODE.values_by_name["OPTIONS_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["STARLARK_OPTIONS_PARSE_FAILURE"]._options = None
_COMMAND_CODE.values_by_name["STARLARK_OPTIONS_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["ARGUMENTS_NOT_RECOGNIZED"]._options = None
_COMMAND_CODE.values_by_name["ARGUMENTS_NOT_RECOGNIZED"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["NOT_IN_WORKSPACE"]._options = None
_COMMAND_CODE.values_by_name["NOT_IN_WORKSPACE"]._serialized_options = b'\262C\002\010\002'
_COMMAND_CODE.values_by_name["SPACES_IN_WORKSPACE_PATH"]._options = None
_COMMAND_CODE.values_by_name["SPACES_IN_WORKSPACE_PATH"]._serialized_options = b'\262C\002\010$'
_COMMAND_CODE.values_by_name["IN_OUTPUT_DIRECTORY"]._options = None
_COMMAND_CODE.values_by_name["IN_OUTPUT_DIRECTORY"]._serialized_options = b'\262C\002\010\002'
_GRPCSERVER_CODE.values_by_name["GRPC_SERVER_UNKNOWN"]._options = None
_GRPCSERVER_CODE.values_by_name["GRPC_SERVER_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_GRPCSERVER_CODE.values_by_name["GRPC_SERVER_NOT_COMPILED_IN"]._options = None
_GRPCSERVER_CODE.values_by_name["GRPC_SERVER_NOT_COMPILED_IN"]._serialized_options = b'\262C\002\010%'
_GRPCSERVER_CODE.values_by_name["SERVER_BIND_FAILURE"]._options = None
_GRPCSERVER_CODE.values_by_name["SERVER_BIND_FAILURE"]._serialized_options = b'\262C\002\010\001'
_GRPCSERVER_CODE.values_by_name["BAD_COOKIE"]._options = None
_GRPCSERVER_CODE.values_by_name["BAD_COOKIE"]._serialized_options = b'\262C\002\010$'
_GRPCSERVER_CODE.values_by_name["NO_CLIENT_DESCRIPTION"]._options = None
_GRPCSERVER_CODE.values_by_name["NO_CLIENT_DESCRIPTION"]._serialized_options = b'\262C\002\010$'
_CANONICALIZEFLAGS_CODE.values_by_name["CANONICALIZE_FLAGS_UNKNOWN"]._options = None
_CANONICALIZEFLAGS_CODE.values_by_name["CANONICALIZE_FLAGS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CANONICALIZEFLAGS_CODE.values_by_name["FOR_COMMAND_INVALID"]._options = None
_CANONICALIZEFLAGS_CODE.values_by_name["FOR_COMMAND_INVALID"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["BUILD_CONFIGURATION_UNKNOWN"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["BUILD_CONFIGURATION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_BUILDCONFIGURATION_CODE.values_by_name["PLATFORM_MAPPING_EVALUATION_FAILURE"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["PLATFORM_MAPPING_EVALUATION_FAILURE"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["PLATFORM_MAPPINGS_FILE_IS_DIRECTORY"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["PLATFORM_MAPPINGS_FILE_IS_DIRECTORY"]._serialized_options = b'\262C\002\010\001'
_BUILDCONFIGURATION_CODE.values_by_name["PLATFORM_MAPPINGS_FILE_NOT_FOUND"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["PLATFORM_MAPPINGS_FILE_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_BUILDCONFIGURATION_CODE.values_by_name["TOP_LEVEL_CONFIGURATION_CREATION_FAILURE"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["TOP_LEVEL_CONFIGURATION_CREATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_BUILDCONFIGURATION_CODE.values_by_name["INVALID_CONFIGURATION"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["INVALID_CONFIGURATION"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["INVALID_BUILD_OPTIONS"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["INVALID_BUILD_OPTIONS"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["MULTI_CPU_PREREQ_UNMET"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["MULTI_CPU_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["HEURISTIC_INSTRUMENTATION_FILTER_INVALID"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["HEURISTIC_INSTRUMENTATION_FILTER_INVALID"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["CYCLE"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["CYCLE"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["CONFLICTING_CONFIGURATIONS"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["CONFLICTING_CONFIGURATIONS"]._serialized_options = b'\262C\002\010\002'
_BUILDCONFIGURATION_CODE.values_by_name["INVALID_OUTPUT_DIRECTORY_MNEMONIC"]._options = None
_BUILDCONFIGURATION_CODE.values_by_name["INVALID_OUTPUT_DIRECTORY_MNEMONIC"]._serialized_options = b'\262C\002\010\002'
_INFOCOMMAND_CODE.values_by_name["INFO_COMMAND_UNKNOWN"]._options = None
_INFOCOMMAND_CODE.values_by_name["INFO_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_INFOCOMMAND_CODE.values_by_name["TOO_MANY_KEYS"]._options = None
_INFOCOMMAND_CODE.values_by_name["TOO_MANY_KEYS"]._serialized_options = b'\262C\002\010\002'
_INFOCOMMAND_CODE.values_by_name["KEY_NOT_RECOGNIZED"]._options = None
_INFOCOMMAND_CODE.values_by_name["KEY_NOT_RECOGNIZED"]._serialized_options = b'\262C\002\010\002'
_INFOCOMMAND_CODE.values_by_name["INFO_BLOCK_WRITE_FAILURE"]._options = None
_INFOCOMMAND_CODE.values_by_name["INFO_BLOCK_WRITE_FAILURE"]._serialized_options = b'\262C\002\010\007'
_INFOCOMMAND_CODE.values_by_name["ALL_INFO_WRITE_FAILURE"]._options = None
_INFOCOMMAND_CODE.values_by_name["ALL_INFO_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_MEMORYOPTIONS_CODE.values_by_name["MEMORY_OPTIONS_UNKNOWN"]._options = None
_MEMORYOPTIONS_CODE.values_by_name["MEMORY_OPTIONS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_MEMORYOPTIONS_CODE.values_by_name["EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE"]._options = None
_MEMORYOPTIONS_CODE.values_by_name["EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE"]._serialized_options = b'\262C\002\010\002'
_MEMORYOPTIONS_CODE.values_by_name["DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND"]._options = None
_MEMORYOPTIONS_CODE.values_by_name["DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND"]._serialized_options = b'\010\001\262C\002\010\002'
_QUERY_CODE.values_by_name["QUERY_UNKNOWN"]._options = None
_QUERY_CODE.values_by_name["QUERY_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_QUERY_CODE.values_by_name["QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION"]._options = None
_QUERY_CODE.values_by_name["QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["QUERY_FILE_READ_FAILURE"]._options = None
_QUERY_CODE.values_by_name["QUERY_FILE_READ_FAILURE"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["COMMAND_LINE_EXPRESSION_MISSING"]._options = None
_QUERY_CODE.values_by_name["COMMAND_LINE_EXPRESSION_MISSING"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["OUTPUT_FORMAT_INVALID"]._options = None
_QUERY_CODE.values_by_name["OUTPUT_FORMAT_INVALID"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["GRAPHLESS_PREREQ_UNMET"]._options = None
_QUERY_CODE.values_by_name["GRAPHLESS_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["QUERY_OUTPUT_WRITE_FAILURE"]._options = None
_QUERY_CODE.values_by_name["QUERY_OUTPUT_WRITE_FAILURE"]._serialized_options = b'\262C\002\010$'
_QUERY_CODE.values_by_name["QUERY_STDOUT_FLUSH_FAILURE"]._options = None
_QUERY_CODE.values_by_name["QUERY_STDOUT_FLUSH_FAILURE"]._serialized_options = b'\262C\002\010$'
_QUERY_CODE.values_by_name["ANALYSIS_QUERY_PREREQ_UNMET"]._options = None
_QUERY_CODE.values_by_name["ANALYSIS_QUERY_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["QUERY_RESULTS_FLUSH_FAILURE"]._options = None
_QUERY_CODE.values_by_name["QUERY_RESULTS_FLUSH_FAILURE"]._serialized_options = b'\262C\002\010$'
_QUERY_CODE.values_by_name["DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR"]._options = None
_QUERY_CODE.values_by_name["DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["VARIABLE_NAME_INVALID"]._options = None
_QUERY_CODE.values_by_name["VARIABLE_NAME_INVALID"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["VARIABLE_UNDEFINED"]._options = None
_QUERY_CODE.values_by_name["VARIABLE_UNDEFINED"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR"]._options = None
_QUERY_CODE.values_by_name["BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["BUILD_FILE_ERROR"]._options = None
_QUERY_CODE.values_by_name["BUILD_FILE_ERROR"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["CYCLE"]._options = None
_QUERY_CODE.values_by_name["CYCLE"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["UNIQUE_SKYKEY_THRESHOLD_EXCEEDED"]._options = None
_QUERY_CODE.values_by_name["UNIQUE_SKYKEY_THRESHOLD_EXCEEDED"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["TARGET_NOT_IN_UNIVERSE_SCOPE"]._options = None
_QUERY_CODE.values_by_name["TARGET_NOT_IN_UNIVERSE_SCOPE"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["INVALID_FULL_UNIVERSE_EXPRESSION"]._options = None
_QUERY_CODE.values_by_name["INVALID_FULL_UNIVERSE_EXPRESSION"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["UNIVERSE_SCOPE_LIMIT_EXCEEDED"]._options = None
_QUERY_CODE.values_by_name["UNIVERSE_SCOPE_LIMIT_EXCEEDED"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["INVALIDATION_LIMIT_EXCEEDED"]._options = None
_QUERY_CODE.values_by_name["INVALIDATION_LIMIT_EXCEEDED"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["OUTPUT_FORMAT_PREREQ_UNMET"]._options = None
_QUERY_CODE.values_by_name["OUTPUT_FORMAT_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["ARGUMENTS_MISSING"]._options = None
_QUERY_CODE.values_by_name["ARGUMENTS_MISSING"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY"]._options = None
_QUERY_CODE.values_by_name["RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["FULL_TARGETS_NOT_SUPPORTED"]._options = None
_QUERY_CODE.values_by_name["FULL_TARGETS_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["DEPRECATED_UNEXPECTED_TOKEN_ERROR"]._options = None
_QUERY_CODE.values_by_name["DEPRECATED_UNEXPECTED_TOKEN_ERROR"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["DEPRECATED_INTEGER_LITERAL_MISSING"]._options = None
_QUERY_CODE.values_by_name["DEPRECATED_INTEGER_LITERAL_MISSING"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["DEPRECATED_INVALID_STARTING_CHARACTER_ERROR"]._options = None
_QUERY_CODE.values_by_name["DEPRECATED_INVALID_STARTING_CHARACTER_ERROR"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["DEPRECATED_PREMATURE_END_OF_INPUT_ERROR"]._options = None
_QUERY_CODE.values_by_name["DEPRECATED_PREMATURE_END_OF_INPUT_ERROR"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["SYNTAX_ERROR"]._options = None
_QUERY_CODE.values_by_name["SYNTAX_ERROR"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["OUTPUT_FORMATTER_IO_EXCEPTION"]._options = None
_QUERY_CODE.values_by_name["OUTPUT_FORMATTER_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_QUERY_CODE.values_by_name["SKYQUERY_TRANSITIVE_TARGET_ERROR"]._options = None
_QUERY_CODE.values_by_name["SKYQUERY_TRANSITIVE_TARGET_ERROR"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["SKYQUERY_TARGET_EXCEPTION"]._options = None
_QUERY_CODE.values_by_name["SKYQUERY_TARGET_EXCEPTION"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["INVALID_LABEL_IN_TEST_SUITE"]._options = None
_QUERY_CODE.values_by_name["INVALID_LABEL_IN_TEST_SUITE"]._serialized_options = b'\262C\002\010\007'
_QUERY_CODE.values_by_name["ILLEGAL_FLAG_COMBINATION"]._options = None
_QUERY_CODE.values_by_name["ILLEGAL_FLAG_COMBINATION"]._serialized_options = b'\262C\002\010\002'
_QUERY_CODE.values_by_name["NON_DETAILED_ERROR"]._options = None
_QUERY_CODE.values_by_name["NON_DETAILED_ERROR"]._serialized_options = b'\262C\002\010\001'
_LOCALEXECUTION_CODE.values_by_name["LOCAL_EXECUTION_UNKNOWN"]._options = None
_LOCALEXECUTION_CODE.values_by_name["LOCAL_EXECUTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_LOCALEXECUTION_CODE.values_by_name["LOCKFREE_OUTPUT_PREREQ_UNMET"]._options = None
_LOCALEXECUTION_CODE.values_by_name["LOCKFREE_OUTPUT_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_ACTIONCACHE_CODE.values_by_name["ACTION_CACHE_UNKNOWN"]._options = None
_ACTIONCACHE_CODE.values_by_name["ACTION_CACHE_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_ACTIONCACHE_CODE.values_by_name["INITIALIZATION_FAILURE"]._options = None
_ACTIONCACHE_CODE.values_by_name["INITIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_FETCHCOMMAND_CODE.values_by_name["FETCH_COMMAND_UNKNOWN"]._options = None
_FETCHCOMMAND_CODE.values_by_name["FETCH_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_FETCHCOMMAND_CODE.values_by_name["EXPRESSION_MISSING"]._options = None
_FETCHCOMMAND_CODE.values_by_name["EXPRESSION_MISSING"]._serialized_options = b'\262C\002\010\002'
_FETCHCOMMAND_CODE.values_by_name["OPTIONS_INVALID"]._options = None
_FETCHCOMMAND_CODE.values_by_name["OPTIONS_INVALID"]._serialized_options = b'\262C\002\010\002'
_FETCHCOMMAND_CODE.values_by_name["QUERY_PARSE_ERROR"]._options = None
_FETCHCOMMAND_CODE.values_by_name["QUERY_PARSE_ERROR"]._serialized_options = b'\262C\002\010\002'
_FETCHCOMMAND_CODE.values_by_name["QUERY_EVALUATION_ERROR"]._options = None
_FETCHCOMMAND_CODE.values_by_name["QUERY_EVALUATION_ERROR"]._serialized_options = b'\262C\002\010\002'
_SYNCCOMMAND_CODE.values_by_name["SYNC_COMMAND_UNKNOWN"]._options = None
_SYNCCOMMAND_CODE.values_by_name["SYNC_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_SYNCCOMMAND_CODE.values_by_name["PACKAGE_LOOKUP_ERROR"]._options = None
_SYNCCOMMAND_CODE.values_by_name["PACKAGE_LOOKUP_ERROR"]._serialized_options = b'\262C\002\010\007'
_SYNCCOMMAND_CODE.values_by_name["WORKSPACE_EVALUATION_ERROR"]._options = None
_SYNCCOMMAND_CODE.values_by_name["WORKSPACE_EVALUATION_ERROR"]._serialized_options = b'\262C\002\010\007'
_SYNCCOMMAND_CODE.values_by_name["REPOSITORY_FETCH_ERRORS"]._options = None
_SYNCCOMMAND_CODE.values_by_name["REPOSITORY_FETCH_ERRORS"]._serialized_options = b'\262C\002\010\007'
_SYNCCOMMAND_CODE.values_by_name["REPOSITORY_NAME_INVALID"]._options = None
_SYNCCOMMAND_CODE.values_by_name["REPOSITORY_NAME_INVALID"]._serialized_options = b'\262C\002\010\007'
_SANDBOX_CODE.values_by_name["SANDBOX_FAILURE_UNKNOWN"]._options = None
_SANDBOX_CODE.values_by_name["SANDBOX_FAILURE_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_SANDBOX_CODE.values_by_name["INITIALIZATION_FAILURE"]._options = None
_SANDBOX_CODE.values_by_name["INITIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_SANDBOX_CODE.values_by_name["EXECUTION_IO_EXCEPTION"]._options = None
_SANDBOX_CODE.values_by_name["EXECUTION_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_SANDBOX_CODE.values_by_name["DOCKER_COMMAND_FAILURE"]._options = None
_SANDBOX_CODE.values_by_name["DOCKER_COMMAND_FAILURE"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["NO_DOCKER_IMAGE"]._options = None
_SANDBOX_CODE.values_by_name["NO_DOCKER_IMAGE"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["DOCKER_IMAGE_PREPARATION_FAILURE"]._options = None
_SANDBOX_CODE.values_by_name["DOCKER_IMAGE_PREPARATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["BIND_MOUNT_ANALYSIS_FAILURE"]._options = None
_SANDBOX_CODE.values_by_name["BIND_MOUNT_ANALYSIS_FAILURE"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["MOUNT_SOURCE_DOES_NOT_EXIST"]._options = None
_SANDBOX_CODE.values_by_name["MOUNT_SOURCE_DOES_NOT_EXIST"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["MOUNT_SOURCE_TARGET_TYPE_MISMATCH"]._options = None
_SANDBOX_CODE.values_by_name["MOUNT_SOURCE_TARGET_TYPE_MISMATCH"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["MOUNT_TARGET_DOES_NOT_EXIST"]._options = None
_SANDBOX_CODE.values_by_name["MOUNT_TARGET_DOES_NOT_EXIST"]._serialized_options = b'\262C\002\010\001'
_SANDBOX_CODE.values_by_name["SUBPROCESS_START_FAILED"]._options = None
_SANDBOX_CODE.values_by_name["SUBPROCESS_START_FAILED"]._serialized_options = b'\262C\002\010$'
_SANDBOX_CODE.values_by_name["FORBIDDEN_INPUT"]._options = None
_SANDBOX_CODE.values_by_name["FORBIDDEN_INPUT"]._serialized_options = b'\262C\002\010\001'
_INCLUDESCANNING_CODE.values_by_name["INCLUDE_SCANNING_UNKNOWN"]._options = None
_INCLUDESCANNING_CODE.values_by_name["INCLUDE_SCANNING_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_INCLUDESCANNING_CODE.values_by_name["INITIALIZE_INCLUDE_HINTS_ERROR"]._options = None
_INCLUDESCANNING_CODE.values_by_name["INITIALIZE_INCLUDE_HINTS_ERROR"]._serialized_options = b'\262C\002\010$'
_INCLUDESCANNING_CODE.values_by_name["SCANNING_IO_EXCEPTION"]._options = None
_INCLUDESCANNING_CODE.values_by_name["SCANNING_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_INCLUDESCANNING_CODE.values_by_name["INCLUDE_HINTS_FILE_NOT_IN_PACKAGE"]._options = None
_INCLUDESCANNING_CODE.values_by_name["INCLUDE_HINTS_FILE_NOT_IN_PACKAGE"]._serialized_options = b'\262C\002\010$'
_INCLUDESCANNING_CODE.values_by_name["INCLUDE_HINTS_READ_FAILURE"]._options = None
_INCLUDESCANNING_CODE.values_by_name["INCLUDE_HINTS_READ_FAILURE"]._serialized_options = b'\262C\002\010$'
_INCLUDESCANNING_CODE.values_by_name["ILLEGAL_ABSOLUTE_PATH"]._options = None
_INCLUDESCANNING_CODE.values_by_name["ILLEGAL_ABSOLUTE_PATH"]._serialized_options = b'\262C\002\010\001'
_INCLUDESCANNING_CODE.values_by_name["PACKAGE_LOAD_FAILURE"]._options = None
_INCLUDESCANNING_CODE.values_by_name["PACKAGE_LOAD_FAILURE"]._serialized_options = b'\262C\002\010\001'
_INCLUDESCANNING_CODE.values_by_name["USER_PACKAGE_LOAD_FAILURE"]._options = None
_INCLUDESCANNING_CODE.values_by_name["USER_PACKAGE_LOAD_FAILURE"]._serialized_options = b'\262C\002\010\001'
_INCLUDESCANNING_CODE.values_by_name["SYSTEM_PACKAGE_LOAD_FAILURE"]._options = None
_INCLUDESCANNING_CODE.values_by_name["SYSTEM_PACKAGE_LOAD_FAILURE"]._serialized_options = b'\262C\002\010$'
_INCLUDESCANNING_CODE.values_by_name["UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE"]._options = None
_INCLUDESCANNING_CODE.values_by_name["UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE"]._serialized_options = b'\262C\002\010\001'
_TESTCOMMAND_CODE.values_by_name["TEST_COMMAND_UNKNOWN"]._options = None
_TESTCOMMAND_CODE.values_by_name["TEST_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_TESTCOMMAND_CODE.values_by_name["NO_TEST_TARGETS"]._options = None
_TESTCOMMAND_CODE.values_by_name["NO_TEST_TARGETS"]._serialized_options = b'\262C\002\010\004'
_TESTCOMMAND_CODE.values_by_name["TEST_WITH_NOANALYZE"]._options = None
_TESTCOMMAND_CODE.values_by_name["TEST_WITH_NOANALYZE"]._serialized_options = b'\262C\002\010\001'
_TESTCOMMAND_CODE.values_by_name["TESTS_FAILED"]._options = None
_TESTCOMMAND_CODE.values_by_name["TESTS_FAILED"]._serialized_options = b'\262C\002\010\003'
_ACTIONQUERY_CODE.values_by_name["ACTION_QUERY_UNKNOWN"]._options = None
_ACTIONQUERY_CODE.values_by_name["ACTION_QUERY_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_ACTIONQUERY_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_ACTIONQUERY_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["OUTPUT_FAILURE"]._options = None
_ACTIONQUERY_CODE.values_by_name["OUTPUT_FAILURE"]._serialized_options = b'\262C\002\010\006'
_ACTIONQUERY_CODE.values_by_name["COMMAND_LINE_EXPRESSION_MISSING"]._options = None
_ACTIONQUERY_CODE.values_by_name["COMMAND_LINE_EXPRESSION_MISSING"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["EXPRESSION_PARSE_FAILURE"]._options = None
_ACTIONQUERY_CODE.values_by_name["EXPRESSION_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION"]._options = None
_ACTIONQUERY_CODE.values_by_name["SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["INVALID_AQUERY_EXPRESSION"]._options = None
_ACTIONQUERY_CODE.values_by_name["INVALID_AQUERY_EXPRESSION"]._serialized_options = b'\262C\002\010\001'
_ACTIONQUERY_CODE.values_by_name["SKYFRAME_STATE_PREREQ_UNMET"]._options = None
_ACTIONQUERY_CODE.values_by_name["SKYFRAME_STATE_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["AQUERY_OUTPUT_TOO_BIG"]._options = None
_ACTIONQUERY_CODE.values_by_name["AQUERY_OUTPUT_TOO_BIG"]._serialized_options = b'\262C\002\010\007'
_ACTIONQUERY_CODE.values_by_name["ILLEGAL_PATTERN_SYNTAX"]._options = None
_ACTIONQUERY_CODE.values_by_name["ILLEGAL_PATTERN_SYNTAX"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["INCORRECT_ARGUMENTS"]._options = None
_ACTIONQUERY_CODE.values_by_name["INCORRECT_ARGUMENTS"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED"]._options = None
_ACTIONQUERY_CODE.values_by_name["TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["SKYFRAME_STATE_AFTER_EXECUTION"]._options = None
_ACTIONQUERY_CODE.values_by_name["SKYFRAME_STATE_AFTER_EXECUTION"]._serialized_options = b'\262C\002\010\001'
_ACTIONQUERY_CODE.values_by_name["LABELS_FUNCTION_NOT_SUPPORTED"]._options = None
_ACTIONQUERY_CODE.values_by_name["LABELS_FUNCTION_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\002'
_ACTIONQUERY_CODE.values_by_name["TEMPLATE_EXPANSION_FAILURE"]._options = None
_ACTIONQUERY_CODE.values_by_name["TEMPLATE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\002'
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERNS_UNKNOWN"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERNS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN"]._serialized_options = b'\262C\002\010\002'
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERN_FILE_READ_FAILURE"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERN_FILE_READ_FAILURE"]._serialized_options = b'\262C\002\010\002'
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERN_PARSE_FAILURE"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_PATTERN_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["PACKAGE_NOT_FOUND"]._options = None
_TARGETPATTERNS_CODE.values_by_name["PACKAGE_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["TARGET_FORMAT_INVALID"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_FORMAT_INVALID"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["ABSOLUTE_TARGET_PATTERN_INVALID"]._options = None
_TARGETPATTERNS_CODE.values_by_name["ABSOLUTE_TARGET_PATTERN_INVALID"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["CANNOT_DETERMINE_TARGET_FROM_FILENAME"]._options = None
_TARGETPATTERNS_CODE.values_by_name["CANNOT_DETERMINE_TARGET_FROM_FILENAME"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["LABEL_SYNTAX_ERROR"]._options = None
_TARGETPATTERNS_CODE.values_by_name["LABEL_SYNTAX_ERROR"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["TARGET_CANNOT_BE_EMPTY_STRING"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_CANNOT_BE_EMPTY_STRING"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["PACKAGE_PART_CANNOT_END_IN_SLASH"]._options = None
_TARGETPATTERNS_CODE.values_by_name["PACKAGE_PART_CANNOT_END_IN_SLASH"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["CYCLE"]._options = None
_TARGETPATTERNS_CODE.values_by_name["CYCLE"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["CANNOT_PRELOAD_TARGET"]._options = None
_TARGETPATTERNS_CODE.values_by_name["CANNOT_PRELOAD_TARGET"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["TARGETS_MISSING"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGETS_MISSING"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED"]._options = None
_TARGETPATTERNS_CODE.values_by_name["RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["UP_LEVEL_REFERENCES_NOT_ALLOWED"]._options = None
_TARGETPATTERNS_CODE.values_by_name["UP_LEVEL_REFERENCES_NOT_ALLOWED"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["NEGATIVE_TARGET_PATTERN_NOT_ALLOWED"]._options = None
_TARGETPATTERNS_CODE.values_by_name["NEGATIVE_TARGET_PATTERN_NOT_ALLOWED"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["TARGET_MUST_BE_A_FILE"]._options = None
_TARGETPATTERNS_CODE.values_by_name["TARGET_MUST_BE_A_FILE"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["DEPENDENCY_NOT_FOUND"]._options = None
_TARGETPATTERNS_CODE.values_by_name["DEPENDENCY_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_TARGETPATTERNS_CODE.values_by_name["PACKAGE_NAME_INVALID"]._options = None
_TARGETPATTERNS_CODE.values_by_name["PACKAGE_NAME_INVALID"]._serialized_options = b'\262C\002\010\001'
_CLEANCOMMAND_CODE.values_by_name["CLEAN_COMMAND_UNKNOWN"]._options = None
_CLEANCOMMAND_CODE.values_by_name["CLEAN_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CLEANCOMMAND_CODE.values_by_name["OUTPUT_SERVICE_CLEAN_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["OUTPUT_SERVICE_CLEAN_FAILURE"]._serialized_options = b'\262C\002\010\006'
_CLEANCOMMAND_CODE.values_by_name["ACTION_CACHE_CLEAN_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["ACTION_CACHE_CLEAN_FAILURE"]._serialized_options = b'\262C\002\010$'
_CLEANCOMMAND_CODE.values_by_name["OUT_ERR_CLOSE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["OUT_ERR_CLOSE_FAILURE"]._serialized_options = b'\262C\002\010$'
_CLEANCOMMAND_CODE.values_by_name["OUTPUT_BASE_DELETE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["OUTPUT_BASE_DELETE_FAILURE"]._serialized_options = b'\262C\002\010$'
_CLEANCOMMAND_CODE.values_by_name["OUTPUT_BASE_TEMP_MOVE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["OUTPUT_BASE_TEMP_MOVE_FAILURE"]._serialized_options = b'\262C\002\010$'
_CLEANCOMMAND_CODE.values_by_name["ASYNC_OUTPUT_BASE_DELETE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["ASYNC_OUTPUT_BASE_DELETE_FAILURE"]._serialized_options = b'\262C\002\010\006'
_CLEANCOMMAND_CODE.values_by_name["EXECROOT_DELETE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["EXECROOT_DELETE_FAILURE"]._serialized_options = b'\262C\002\010$'
_CLEANCOMMAND_CODE.values_by_name["EXECROOT_TEMP_MOVE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["EXECROOT_TEMP_MOVE_FAILURE"]._serialized_options = b'\262C\002\010$'
_CLEANCOMMAND_CODE.values_by_name["ASYNC_EXECROOT_DELETE_FAILURE"]._options = None
_CLEANCOMMAND_CODE.values_by_name["ASYNC_EXECROOT_DELETE_FAILURE"]._serialized_options = b'\262C\002\010\006'
_CLEANCOMMAND_CODE.values_by_name["ARGUMENTS_NOT_RECOGNIZED"]._options = None
_CLEANCOMMAND_CODE.values_by_name["ARGUMENTS_NOT_RECOGNIZED"]._serialized_options = b'\262C\002\010\002'
_CONFIGCOMMAND_CODE.values_by_name["CONFIG_COMMAND_UNKNOWN"]._options = None
_CONFIGCOMMAND_CODE.values_by_name["CONFIG_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CONFIGCOMMAND_CODE.values_by_name["TOO_MANY_CONFIG_IDS"]._options = None
_CONFIGCOMMAND_CODE.values_by_name["TOO_MANY_CONFIG_IDS"]._serialized_options = b'\262C\002\010\002'
_CONFIGCOMMAND_CODE.values_by_name["CONFIGURATION_NOT_FOUND"]._options = None
_CONFIGCOMMAND_CODE.values_by_name["CONFIGURATION_NOT_FOUND"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["CONFIGURABLE_QUERY_UNKNOWN"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["CONFIGURABLE_QUERY_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CONFIGURABLEQUERY_CODE.values_by_name["COMMAND_LINE_EXPRESSION_MISSING"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["COMMAND_LINE_EXPRESSION_MISSING"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["EXPRESSION_PARSE_FAILURE"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["EXPRESSION_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["FILTERS_NOT_SUPPORTED"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["FILTERS_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["BUILDFILES_FUNCTION_NOT_SUPPORTED"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["BUILDFILES_FUNCTION_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["SIBLINGS_FUNCTION_NOT_SUPPORTED"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["SIBLINGS_FUNCTION_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["VISIBLE_FUNCTION_NOT_SUPPORTED"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["VISIBLE_FUNCTION_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["ATTRIBUTE_MISSING"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["ATTRIBUTE_MISSING"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["INCORRECT_CONFIG_ARGUMENT_ERROR"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["INCORRECT_CONFIG_ARGUMENT_ERROR"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["TARGET_MISSING"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["TARGET_MISSING"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["STARLARK_SYNTAX_ERROR"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["STARLARK_SYNTAX_ERROR"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["STARLARK_EVAL_ERROR"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["STARLARK_EVAL_ERROR"]._serialized_options = b'\262C\002\010\002'
_CONFIGURABLEQUERY_CODE.values_by_name["FORMAT_FUNCTION_ERROR"]._options = None
_CONFIGURABLEQUERY_CODE.values_by_name["FORMAT_FUNCTION_ERROR"]._serialized_options = b'\262C\002\010\002'
_DUMPCOMMAND_CODE.values_by_name["DUMP_COMMAND_UNKNOWN"]._options = None
_DUMPCOMMAND_CODE.values_by_name["DUMP_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_DUMPCOMMAND_CODE.values_by_name["NO_OUTPUT_SPECIFIED"]._options = None
_DUMPCOMMAND_CODE.values_by_name["NO_OUTPUT_SPECIFIED"]._serialized_options = b'\262C\002\010\007'
_DUMPCOMMAND_CODE.values_by_name["ACTION_CACHE_DUMP_FAILED"]._options = None
_DUMPCOMMAND_CODE.values_by_name["ACTION_CACHE_DUMP_FAILED"]._serialized_options = b'\262C\002\010\007'
_DUMPCOMMAND_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_DUMPCOMMAND_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\007'
_DUMPCOMMAND_CODE.values_by_name["ACTION_GRAPH_DUMP_FAILED"]._options = None
_DUMPCOMMAND_CODE.values_by_name["ACTION_GRAPH_DUMP_FAILED"]._serialized_options = b'\262C\002\010\007'
_DUMPCOMMAND_CODE.values_by_name["STARLARK_HEAP_DUMP_FAILED"]._options = None
_DUMPCOMMAND_CODE.values_by_name["STARLARK_HEAP_DUMP_FAILED"]._serialized_options = b'\262C\002\010\010'
_HELPCOMMAND_CODE.values_by_name["HELP_COMMAND_UNKNOWN"]._options = None
_HELPCOMMAND_CODE.values_by_name["HELP_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_HELPCOMMAND_CODE.values_by_name["MISSING_ARGUMENT"]._options = None
_HELPCOMMAND_CODE.values_by_name["MISSING_ARGUMENT"]._serialized_options = b'\262C\002\010\002'
_HELPCOMMAND_CODE.values_by_name["COMMAND_NOT_FOUND"]._options = None
_HELPCOMMAND_CODE.values_by_name["COMMAND_NOT_FOUND"]._serialized_options = b'\262C\002\010\002'
_MOBILEINSTALL_CODE.values_by_name["MOBILE_INSTALL_UNKNOWN"]._options = None
_MOBILEINSTALL_CODE.values_by_name["MOBILE_INSTALL_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_MOBILEINSTALL_CODE.values_by_name["CLASSIC_UNSUPPORTED"]._options = None
_MOBILEINSTALL_CODE.values_by_name["CLASSIC_UNSUPPORTED"]._serialized_options = b'\262C\002\010\002'
_MOBILEINSTALL_CODE.values_by_name["NO_TARGET_SPECIFIED"]._options = None
_MOBILEINSTALL_CODE.values_by_name["NO_TARGET_SPECIFIED"]._serialized_options = b'\262C\002\010\002'
_MOBILEINSTALL_CODE.values_by_name["MULTIPLE_TARGETS_SPECIFIED"]._options = None
_MOBILEINSTALL_CODE.values_by_name["MULTIPLE_TARGETS_SPECIFIED"]._serialized_options = b'\262C\002\010\002'
_MOBILEINSTALL_CODE.values_by_name["TARGET_TYPE_INVALID"]._options = None
_MOBILEINSTALL_CODE.values_by_name["TARGET_TYPE_INVALID"]._serialized_options = b'\262C\002\010\006'
_MOBILEINSTALL_CODE.values_by_name["NON_ZERO_EXIT"]._options = None
_MOBILEINSTALL_CODE.values_by_name["NON_ZERO_EXIT"]._serialized_options = b'\262C\002\010\006'
_MOBILEINSTALL_CODE.values_by_name["ERROR_RUNNING_PROGRAM"]._options = None
_MOBILEINSTALL_CODE.values_by_name["ERROR_RUNNING_PROGRAM"]._serialized_options = b'\262C\002\010\006'
_PROFILECOMMAND_CODE.values_by_name["PROFILE_COMMAND_UNKNOWN"]._options = None
_PROFILECOMMAND_CODE.values_by_name["PROFILE_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_PROFILECOMMAND_CODE.values_by_name["OLD_BINARY_FORMAT_UNSUPPORTED"]._options = None
_PROFILECOMMAND_CODE.values_by_name["OLD_BINARY_FORMAT_UNSUPPORTED"]._serialized_options = b'\262C\002\010\001'
_PROFILECOMMAND_CODE.values_by_name["FILE_READ_FAILURE"]._options = None
_PROFILECOMMAND_CODE.values_by_name["FILE_READ_FAILURE"]._serialized_options = b'\262C\002\010\001'
_RUNCOMMAND_CODE.values_by_name["RUN_COMMAND_UNKNOWN"]._options = None
_RUNCOMMAND_CODE.values_by_name["RUN_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_RUNCOMMAND_CODE.values_by_name["NO_TARGET_SPECIFIED"]._options = None
_RUNCOMMAND_CODE.values_by_name["NO_TARGET_SPECIFIED"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["TOO_MANY_TARGETS_SPECIFIED"]._options = None
_RUNCOMMAND_CODE.values_by_name["TOO_MANY_TARGETS_SPECIFIED"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["TARGET_NOT_EXECUTABLE"]._options = None
_RUNCOMMAND_CODE.values_by_name["TARGET_NOT_EXECUTABLE"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE"]._options = None
_RUNCOMMAND_CODE.values_by_name["TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE"]._serialized_options = b'\262C\002\010\001'
_RUNCOMMAND_CODE.values_by_name["TARGET_BUILT_BUT_PATH_VALIDATION_FAILED"]._options = None
_RUNCOMMAND_CODE.values_by_name["TARGET_BUILT_BUT_PATH_VALIDATION_FAILED"]._serialized_options = b'\262C\002\010$'
_RUNCOMMAND_CODE.values_by_name["RUN_UNDER_TARGET_NOT_BUILT"]._options = None
_RUNCOMMAND_CODE.values_by_name["RUN_UNDER_TARGET_NOT_BUILT"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["RUN_PREREQ_UNMET"]._options = None
_RUNCOMMAND_CODE.values_by_name["RUN_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["TOO_MANY_TEST_SHARDS_OR_RUNS"]._options = None
_RUNCOMMAND_CODE.values_by_name["TOO_MANY_TEST_SHARDS_OR_RUNS"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["TEST_ENVIRONMENT_SETUP_FAILURE"]._options = None
_RUNCOMMAND_CODE.values_by_name["TEST_ENVIRONMENT_SETUP_FAILURE"]._serialized_options = b'\262C\002\010$'
_RUNCOMMAND_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_RUNCOMMAND_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010$'
_RUNCOMMAND_CODE.values_by_name["NO_SHELL_SPECIFIED"]._options = None
_RUNCOMMAND_CODE.values_by_name["NO_SHELL_SPECIFIED"]._serialized_options = b'\262C\002\010\002'
_RUNCOMMAND_CODE.values_by_name["SCRIPT_WRITE_FAILURE"]._options = None
_RUNCOMMAND_CODE.values_by_name["SCRIPT_WRITE_FAILURE"]._serialized_options = b'\262C\002\010\006'
_RUNCOMMAND_CODE.values_by_name["RUNFILES_DIRECTORIES_CREATION_FAILURE"]._options = None
_RUNCOMMAND_CODE.values_by_name["RUNFILES_DIRECTORIES_CREATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_RUNCOMMAND_CODE.values_by_name["RUNFILES_SYMLINKS_CREATION_FAILURE"]._options = None
_RUNCOMMAND_CODE.values_by_name["RUNFILES_SYMLINKS_CREATION_FAILURE"]._serialized_options = b'\262C\002\010$'
_RUNCOMMAND_CODE.values_by_name["TEST_ENVIRONMENT_SETUP_INTERRUPTED"]._options = None
_RUNCOMMAND_CODE.values_by_name["TEST_ENVIRONMENT_SETUP_INTERRUPTED"]._serialized_options = b'\262C\002\010\010'
_VERSIONCOMMAND_CODE.values_by_name["VERSION_COMMAND_UNKNOWN"]._options = None
_VERSIONCOMMAND_CODE.values_by_name["VERSION_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_VERSIONCOMMAND_CODE.values_by_name["NOT_AVAILABLE"]._options = None
_VERSIONCOMMAND_CODE.values_by_name["NOT_AVAILABLE"]._serialized_options = b'\262C\002\010\002'
_PRINTACTIONCOMMAND_CODE.values_by_name["PRINT_ACTION_COMMAND_UNKNOWN"]._options = None
_PRINTACTIONCOMMAND_CODE.values_by_name["PRINT_ACTION_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_PRINTACTIONCOMMAND_CODE.values_by_name["TARGET_NOT_FOUND"]._options = None
_PRINTACTIONCOMMAND_CODE.values_by_name["TARGET_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_PRINTACTIONCOMMAND_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_PRINTACTIONCOMMAND_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_PRINTACTIONCOMMAND_CODE.values_by_name["TARGET_KIND_UNSUPPORTED"]._options = None
_PRINTACTIONCOMMAND_CODE.values_by_name["TARGET_KIND_UNSUPPORTED"]._serialized_options = b'\262C\002\010\001'
_PRINTACTIONCOMMAND_CODE.values_by_name["ACTIONS_NOT_FOUND"]._options = None
_PRINTACTIONCOMMAND_CODE.values_by_name["ACTIONS_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_WORKSPACESTATUS_CODE.values_by_name["WORKSPACE_STATUS_UNKNOWN"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["WORKSPACE_STATUS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_WORKSPACESTATUS_CODE.values_by_name["NON_ZERO_EXIT"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["NON_ZERO_EXIT"]._serialized_options = b'\262C\002\010\001'
_WORKSPACESTATUS_CODE.values_by_name["ABNORMAL_TERMINATION"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["ABNORMAL_TERMINATION"]._serialized_options = b'\262C\002\010\001'
_WORKSPACESTATUS_CODE.values_by_name["EXEC_FAILED"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["EXEC_FAILED"]._serialized_options = b'\262C\002\010\001'
_WORKSPACESTATUS_CODE.values_by_name["PARSE_FAILURE"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["PARSE_FAILURE"]._serialized_options = b'\262C\002\010$'
_WORKSPACESTATUS_CODE.values_by_name["VALIDATION_FAILURE"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["VALIDATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKSPACESTATUS_CODE.values_by_name["CONTENT_UPDATE_IO_EXCEPTION"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["CONTENT_UPDATE_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_WORKSPACESTATUS_CODE.values_by_name["STDERR_IO_EXCEPTION"]._options = None
_WORKSPACESTATUS_CODE.values_by_name["STDERR_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_JAVACOMPILE_CODE.values_by_name["JAVA_COMPILE_UNKNOWN"]._options = None
_JAVACOMPILE_CODE.values_by_name["JAVA_COMPILE_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_JAVACOMPILE_CODE.values_by_name["REDUCED_CLASSPATH_FAILURE"]._options = None
_JAVACOMPILE_CODE.values_by_name["REDUCED_CLASSPATH_FAILURE"]._serialized_options = b'\262C\002\010\001'
_JAVACOMPILE_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_JAVACOMPILE_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_JAVACOMPILE_CODE.values_by_name["JDEPS_READ_IO_EXCEPTION"]._options = None
_JAVACOMPILE_CODE.values_by_name["JDEPS_READ_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_JAVACOMPILE_CODE.values_by_name["REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE"]._options = None
_JAVACOMPILE_CODE.values_by_name["REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE"]._serialized_options = b'\262C\002\010$'
_ACTIONREWINDING_CODE.values_by_name["ACTION_REWINDING_UNKNOWN"]._options = None
_ACTIONREWINDING_CODE.values_by_name["ACTION_REWINDING_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_ACTIONREWINDING_CODE.values_by_name["LOST_INPUT_TOO_MANY_TIMES"]._options = None
_ACTIONREWINDING_CODE.values_by_name["LOST_INPUT_TOO_MANY_TIMES"]._serialized_options = b'\262C\002\010\001'
_ACTIONREWINDING_CODE.values_by_name["LOST_INPUT_IS_SOURCE"]._options = None
_ACTIONREWINDING_CODE.values_by_name["LOST_INPUT_IS_SOURCE"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["CPP_COMPILE_UNKNOWN"]._options = None
_CPPCOMPILE_CODE.values_by_name["CPP_COMPILE_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CPPCOMPILE_CODE.values_by_name["FIND_USED_HEADERS_IO_EXCEPTION"]._options = None
_CPPCOMPILE_CODE.values_by_name["FIND_USED_HEADERS_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_CPPCOMPILE_CODE.values_by_name["COPY_OUT_ERR_FAILURE"]._options = None
_CPPCOMPILE_CODE.values_by_name["COPY_OUT_ERR_FAILURE"]._serialized_options = b'\262C\002\010$'
_CPPCOMPILE_CODE.values_by_name["D_FILE_READ_FAILURE"]._options = None
_CPPCOMPILE_CODE.values_by_name["D_FILE_READ_FAILURE"]._serialized_options = b'\262C\002\010$'
_CPPCOMPILE_CODE.values_by_name["COMMAND_GENERATION_FAILURE"]._options = None
_CPPCOMPILE_CODE.values_by_name["COMMAND_GENERATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["MODULE_EXPANSION_TIMEOUT"]._options = None
_CPPCOMPILE_CODE.values_by_name["MODULE_EXPANSION_TIMEOUT"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["INCLUDE_PATH_OUTSIDE_EXEC_ROOT"]._options = None
_CPPCOMPILE_CODE.values_by_name["INCLUDE_PATH_OUTSIDE_EXEC_ROOT"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["FAKE_COMMAND_GENERATION_FAILURE"]._options = None
_CPPCOMPILE_CODE.values_by_name["FAKE_COMMAND_GENERATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["UNDECLARED_INCLUSIONS"]._options = None
_CPPCOMPILE_CODE.values_by_name["UNDECLARED_INCLUSIONS"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["D_FILE_PARSE_FAILURE"]._options = None
_CPPCOMPILE_CODE.values_by_name["D_FILE_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["COVERAGE_NOTES_CREATION_FAILURE"]._options = None
_CPPCOMPILE_CODE.values_by_name["COVERAGE_NOTES_CREATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_CPPCOMPILE_CODE.values_by_name["MODULE_EXPANSION_MISSING_DATA"]._options = None
_CPPCOMPILE_CODE.values_by_name["MODULE_EXPANSION_MISSING_DATA"]._serialized_options = b'\262C\002\010\001'
_STARLARKACTION_CODE.values_by_name["STARLARK_ACTION_UNKNOWN"]._options = None
_STARLARKACTION_CODE.values_by_name["STARLARK_ACTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_STARLARKACTION_CODE.values_by_name["UNUSED_INPUT_LIST_READ_FAILURE"]._options = None
_STARLARKACTION_CODE.values_by_name["UNUSED_INPUT_LIST_READ_FAILURE"]._serialized_options = b'\262C\002\010$'
_STARLARKACTION_CODE.values_by_name["UNUSED_INPUT_LIST_FILE_NOT_FOUND"]._options = None
_STARLARKACTION_CODE.values_by_name["UNUSED_INPUT_LIST_FILE_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_NINJAACTION_CODE.values_by_name["NINJA_ACTION_UNKNOWN"]._options = None
_NINJAACTION_CODE.values_by_name["NINJA_ACTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_NINJAACTION_CODE.values_by_name["INVALID_DEPFILE_DECLARED_DEPENDENCY"]._options = None
_NINJAACTION_CODE.values_by_name["INVALID_DEPFILE_DECLARED_DEPENDENCY"]._serialized_options = b'\262C\002\010$'
_NINJAACTION_CODE.values_by_name["D_FILE_PARSE_FAILURE"]._options = None
_NINJAACTION_CODE.values_by_name["D_FILE_PARSE_FAILURE"]._serialized_options = b'\262C\002\010$'
_DYNAMICEXECUTION_CODE.values_by_name["DYNAMIC_EXECUTION_UNKNOWN"]._options = None
_DYNAMICEXECUTION_CODE.values_by_name["DYNAMIC_EXECUTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_DYNAMICEXECUTION_CODE.values_by_name["XCODE_RELATED_PREREQ_UNMET"]._options = None
_DYNAMICEXECUTION_CODE.values_by_name["XCODE_RELATED_PREREQ_UNMET"]._serialized_options = b'\262C\002\010$'
_DYNAMICEXECUTION_CODE.values_by_name["ACTION_LOG_MOVE_FAILURE"]._options = None
_DYNAMICEXECUTION_CODE.values_by_name["ACTION_LOG_MOVE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_DYNAMICEXECUTION_CODE.values_by_name["RUN_FAILURE"]._options = None
_DYNAMICEXECUTION_CODE.values_by_name["RUN_FAILURE"]._serialized_options = b'\262C\002\010\001'
_DYNAMICEXECUTION_CODE.values_by_name["NO_USABLE_STRATEGY_FOUND"]._options = None
_DYNAMICEXECUTION_CODE.values_by_name["NO_USABLE_STRATEGY_FOUND"]._serialized_options = b'\262C\002\010\002'
_FAILACTION_CODE.values_by_name["FAIL_ACTION_UNKNOWN"]._options = None
_FAILACTION_CODE.values_by_name["FAIL_ACTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_FAILACTION_CODE.values_by_name["INTENTIONAL_FAILURE"]._options = None
_FAILACTION_CODE.values_by_name["INTENTIONAL_FAILURE"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["INCORRECT_PYTHON_VERSION"]._options = None
_FAILACTION_CODE.values_by_name["INCORRECT_PYTHON_VERSION"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["PROGUARD_SPECS_MISSING"]._options = None
_FAILACTION_CODE.values_by_name["PROGUARD_SPECS_MISSING"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["DYNAMIC_LINKING_NOT_SUPPORTED"]._options = None
_FAILACTION_CODE.values_by_name["DYNAMIC_LINKING_NOT_SUPPORTED"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["SOURCE_FILES_MISSING"]._options = None
_FAILACTION_CODE.values_by_name["SOURCE_FILES_MISSING"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["INCORRECT_TOOLCHAIN"]._options = None
_FAILACTION_CODE.values_by_name["INCORRECT_TOOLCHAIN"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["FRAGMENT_CLASS_MISSING"]._options = None
_FAILACTION_CODE.values_by_name["FRAGMENT_CLASS_MISSING"]._serialized_options = b'\262C\002\010\001'
_FAILACTION_CODE.values_by_name["CANT_BUILD_INCOMPATIBLE_TARGET"]._options = None
_FAILACTION_CODE.values_by_name["CANT_BUILD_INCOMPATIBLE_TARGET"]._serialized_options = b'\262C\002\010\001'
_SYMLINKACTION_CODE.values_by_name["SYMLINK_ACTION_UNKNOWN"]._options = None
_SYMLINKACTION_CODE.values_by_name["SYMLINK_ACTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_SYMLINKACTION_CODE.values_by_name["EXECUTABLE_INPUT_NOT_FILE"]._options = None
_SYMLINKACTION_CODE.values_by_name["EXECUTABLE_INPUT_NOT_FILE"]._serialized_options = b'\262C\002\010\001'
_SYMLINKACTION_CODE.values_by_name["EXECUTABLE_INPUT_IS_NOT"]._options = None
_SYMLINKACTION_CODE.values_by_name["EXECUTABLE_INPUT_IS_NOT"]._serialized_options = b'\262C\002\010\001'
_SYMLINKACTION_CODE.values_by_name["EXECUTABLE_INPUT_CHECK_IO_EXCEPTION"]._options = None
_SYMLINKACTION_CODE.values_by_name["EXECUTABLE_INPUT_CHECK_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_SYMLINKACTION_CODE.values_by_name["LINK_CREATION_IO_EXCEPTION"]._options = None
_SYMLINKACTION_CODE.values_by_name["LINK_CREATION_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_SYMLINKACTION_CODE.values_by_name["LINK_TOUCH_IO_EXCEPTION"]._options = None
_SYMLINKACTION_CODE.values_by_name["LINK_TOUCH_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_CPPLINK_CODE.values_by_name["CPP_LINK_UNKNOWN"]._options = None
_CPPLINK_CODE.values_by_name["CPP_LINK_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_CPPLINK_CODE.values_by_name["COMMAND_GENERATION_FAILURE"]._options = None
_CPPLINK_CODE.values_by_name["COMMAND_GENERATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_CPPLINK_CODE.values_by_name["FAKE_COMMAND_GENERATION_FAILURE"]._options = None
_CPPLINK_CODE.values_by_name["FAKE_COMMAND_GENERATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_LTOACTION_CODE.values_by_name["LTO_ACTION_UNKNOWN"]._options = None
_LTOACTION_CODE.values_by_name["LTO_ACTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_LTOACTION_CODE.values_by_name["INVALID_ABSOLUTE_PATH_IN_IMPORTS"]._options = None
_LTOACTION_CODE.values_by_name["INVALID_ABSOLUTE_PATH_IN_IMPORTS"]._serialized_options = b'\262C\002\010\001'
_LTOACTION_CODE.values_by_name["MISSING_BITCODE_FILES"]._options = None
_LTOACTION_CODE.values_by_name["MISSING_BITCODE_FILES"]._serialized_options = b'\262C\002\010\001'
_LTOACTION_CODE.values_by_name["IMPORTS_READ_IO_EXCEPTION"]._options = None
_LTOACTION_CODE.values_by_name["IMPORTS_READ_IO_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_TESTACTION_CODE.values_by_name["TEST_ACTION_UNKNOWN"]._options = None
_TESTACTION_CODE.values_by_name["TEST_ACTION_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_TESTACTION_CODE.values_by_name["NO_KEEP_GOING_TEST_FAILURE"]._options = None
_TESTACTION_CODE.values_by_name["NO_KEEP_GOING_TEST_FAILURE"]._serialized_options = b'\262C\002\010\001'
_TESTACTION_CODE.values_by_name["LOCAL_TEST_PREREQ_UNMET"]._options = None
_TESTACTION_CODE.values_by_name["LOCAL_TEST_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\001'
_TESTACTION_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._options = None
_TESTACTION_CODE.values_by_name["COMMAND_LINE_EXPANSION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_TESTACTION_CODE.values_by_name["DUPLICATE_CPU_TAGS"]._options = None
_TESTACTION_CODE.values_by_name["DUPLICATE_CPU_TAGS"]._serialized_options = b'\262C\002\010\001'
_TESTACTION_CODE.values_by_name["INVALID_CPU_TAG"]._options = None
_TESTACTION_CODE.values_by_name["INVALID_CPU_TAG"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["WORKER_UNKNOWN"]._options = None
_WORKER_CODE.values_by_name["WORKER_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_WORKER_CODE.values_by_name["MULTIPLEXER_INSTANCE_REMOVAL_FAILURE"]._options = None
_WORKER_CODE.values_by_name["MULTIPLEXER_INSTANCE_REMOVAL_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["MULTIPLEXER_DOES_NOT_EXIST"]._options = None
_WORKER_CODE.values_by_name["MULTIPLEXER_DOES_NOT_EXIST"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["NO_TOOLS"]._options = None
_WORKER_CODE.values_by_name["NO_TOOLS"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["NO_FLAGFILE"]._options = None
_WORKER_CODE.values_by_name["NO_FLAGFILE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["VIRTUAL_INPUT_MATERIALIZATION_FAILURE"]._options = None
_WORKER_CODE.values_by_name["VIRTUAL_INPUT_MATERIALIZATION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["BORROW_FAILURE"]._options = None
_WORKER_CODE.values_by_name["BORROW_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["PREFETCH_FAILURE"]._options = None
_WORKER_CODE.values_by_name["PREFETCH_FAILURE"]._serialized_options = b'\262C\002\010$'
_WORKER_CODE.values_by_name["PREPARE_FAILURE"]._options = None
_WORKER_CODE.values_by_name["PREPARE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["REQUEST_FAILURE"]._options = None
_WORKER_CODE.values_by_name["REQUEST_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["PARSE_RESPONSE_FAILURE"]._options = None
_WORKER_CODE.values_by_name["PARSE_RESPONSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["NO_RESPONSE"]._options = None
_WORKER_CODE.values_by_name["NO_RESPONSE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["FINISH_FAILURE"]._options = None
_WORKER_CODE.values_by_name["FINISH_FAILURE"]._serialized_options = b'\262C\002\010\001'
_WORKER_CODE.values_by_name["FORBIDDEN_INPUT"]._options = None
_WORKER_CODE.values_by_name["FORBIDDEN_INPUT"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ANALYSIS_UNKNOWN"]._options = None
_ANALYSIS_CODE.values_by_name["ANALYSIS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_ANALYSIS_CODE.values_by_name["LOAD_FAILURE"]._options = None
_ANALYSIS_CODE.values_by_name["LOAD_FAILURE"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["GENERIC_LOADING_PHASE_FAILURE"]._options = None
_ANALYSIS_CODE.values_by_name["GENERIC_LOADING_PHASE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["NOT_ALL_TARGETS_ANALYZED"]._options = None
_ANALYSIS_CODE.values_by_name["NOT_ALL_TARGETS_ANALYZED"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["CYCLE"]._options = None
_ANALYSIS_CODE.values_by_name["CYCLE"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID"]._options = None
_ANALYSIS_CODE.values_by_name["PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ASPECT_LABEL_SYNTAX_ERROR"]._options = None
_ANALYSIS_CODE.values_by_name["ASPECT_LABEL_SYNTAX_ERROR"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ASPECT_PREREQ_UNMET"]._options = None
_ANALYSIS_CODE.values_by_name["ASPECT_PREREQ_UNMET"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ASPECT_NOT_FOUND"]._options = None
_ANALYSIS_CODE.values_by_name["ASPECT_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ACTION_CONFLICT"]._options = None
_ANALYSIS_CODE.values_by_name["ACTION_CONFLICT"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ARTIFACT_PREFIX_CONFLICT"]._options = None
_ANALYSIS_CODE.values_by_name["ARTIFACT_PREFIX_CONFLICT"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["UNEXPECTED_ANALYSIS_EXCEPTION"]._options = None
_ANALYSIS_CODE.values_by_name["UNEXPECTED_ANALYSIS_EXCEPTION"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["TARGETS_MISSING_ENVIRONMENTS"]._options = None
_ANALYSIS_CODE.values_by_name["TARGETS_MISSING_ENVIRONMENTS"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["INVALID_ENVIRONMENT"]._options = None
_ANALYSIS_CODE.values_by_name["INVALID_ENVIRONMENT"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ENVIRONMENT_MISSING_FROM_GROUPS"]._options = None
_ANALYSIS_CODE.values_by_name["ENVIRONMENT_MISSING_FROM_GROUPS"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["EXEC_GROUP_MISSING"]._options = None
_ANALYSIS_CODE.values_by_name["EXEC_GROUP_MISSING"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["INVALID_EXECUTION_PLATFORM"]._options = None
_ANALYSIS_CODE.values_by_name["INVALID_EXECUTION_PLATFORM"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ASPECT_CREATION_FAILED"]._options = None
_ANALYSIS_CODE.values_by_name["ASPECT_CREATION_FAILED"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["CONFIGURED_VALUE_CREATION_FAILED"]._options = None
_ANALYSIS_CODE.values_by_name["CONFIGURED_VALUE_CREATION_FAILED"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["INCOMPATIBLE_TARGET_REQUESTED"]._options = None
_ANALYSIS_CODE.values_by_name["INCOMPATIBLE_TARGET_REQUESTED"]._serialized_options = b'\262C\002\010\001'
_ANALYSIS_CODE.values_by_name["ANALYSIS_FAILURE_PROPAGATION_FAILED"]._options = None
_ANALYSIS_CODE.values_by_name["ANALYSIS_FAILURE_PROPAGATION_FAILED"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["PACKAGE_LOADING_UNKNOWN"]._options = None
_PACKAGELOADING_CODE.values_by_name["PACKAGE_LOADING_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_PACKAGELOADING_CODE.values_by_name["WORKSPACE_FILE_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["WORKSPACE_FILE_ERROR"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["MAX_COMPUTATION_STEPS_EXCEEDED"]._options = None
_PACKAGELOADING_CODE.values_by_name["MAX_COMPUTATION_STEPS_EXCEEDED"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["BUILD_FILE_MISSING"]._options = None
_PACKAGELOADING_CODE.values_by_name["BUILD_FILE_MISSING"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["REPOSITORY_MISSING"]._options = None
_PACKAGELOADING_CODE.values_by_name["REPOSITORY_MISSING"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR"]._serialized_options = b'\262C\002\010$'
_PACKAGELOADING_CODE.values_by_name["TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR"]._serialized_options = b'\262C\002\010$'
_PACKAGELOADING_CODE.values_by_name["INVALID_NAME"]._options = None
_PACKAGELOADING_CODE.values_by_name["INVALID_NAME"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["EVAL_GLOBS_SYMLINK_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["EVAL_GLOBS_SYMLINK_ERROR"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["IMPORT_STARLARK_FILE_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["IMPORT_STARLARK_FILE_ERROR"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["PACKAGE_MISSING"]._options = None
_PACKAGELOADING_CODE.values_by_name["PACKAGE_MISSING"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["TARGET_MISSING"]._options = None
_PACKAGELOADING_CODE.values_by_name["TARGET_MISSING"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["NO_SUCH_THING"]._options = None
_PACKAGELOADING_CODE.values_by_name["NO_SUCH_THING"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["GLOB_IO_EXCEPTION"]._options = None
_PACKAGELOADING_CODE.values_by_name["GLOB_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_PACKAGELOADING_CODE.values_by_name["DUPLICATE_LABEL"]._options = None
_PACKAGELOADING_CODE.values_by_name["DUPLICATE_LABEL"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["INVALID_PACKAGE_SPECIFICATION"]._options = None
_PACKAGELOADING_CODE.values_by_name["INVALID_PACKAGE_SPECIFICATION"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["SYNTAX_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["SYNTAX_ERROR"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_IN_DIFFERENT_PACKAGE"]._options = None
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_IN_DIFFERENT_PACKAGE"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["DEFAULT_ENVIRONMENT_UNDECLARED"]._options = None
_PACKAGELOADING_CODE.values_by_name["DEFAULT_ENVIRONMENT_UNDECLARED"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_IN_MULTIPLE_GROUPS"]._options = None
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_IN_MULTIPLE_GROUPS"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_DOES_NOT_EXIST"]._options = None
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_DOES_NOT_EXIST"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_INVALID"]._options = None
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_INVALID"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_NOT_IN_GROUP"]._options = None
_PACKAGELOADING_CODE.values_by_name["ENVIRONMENT_NOT_IN_GROUP"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["PACKAGE_NAME_INVALID"]._options = None
_PACKAGELOADING_CODE.values_by_name["PACKAGE_NAME_INVALID"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["STARLARK_EVAL_ERROR"]._options = None
_PACKAGELOADING_CODE.values_by_name["STARLARK_EVAL_ERROR"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["LICENSE_PARSE_FAILURE"]._options = None
_PACKAGELOADING_CODE.values_by_name["LICENSE_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["DISTRIBUTIONS_PARSE_FAILURE"]._options = None
_PACKAGELOADING_CODE.values_by_name["DISTRIBUTIONS_PARSE_FAILURE"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["LABEL_CROSSES_PACKAGE_BOUNDARY"]._options = None
_PACKAGELOADING_CODE.values_by_name["LABEL_CROSSES_PACKAGE_BOUNDARY"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["BUILTINS_INJECTION_FAILURE"]._options = None
_PACKAGELOADING_CODE.values_by_name["BUILTINS_INJECTION_FAILURE"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["SYMLINK_CYCLE_OR_INFINITE_EXPANSION"]._options = None
_PACKAGELOADING_CODE.values_by_name["SYMLINK_CYCLE_OR_INFINITE_EXPANSION"]._serialized_options = b'\262C\002\010\001'
_PACKAGELOADING_CODE.values_by_name["OTHER_IO_EXCEPTION"]._options = None
_PACKAGELOADING_CODE.values_by_name["OTHER_IO_EXCEPTION"]._serialized_options = b'\262C\002\010$'
_TOOLCHAIN_CODE.values_by_name["TOOLCHAIN_UNKNOWN"]._options = None
_TOOLCHAIN_CODE.values_by_name["TOOLCHAIN_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_TOOLCHAIN_CODE.values_by_name["MISSING_PROVIDER"]._options = None
_TOOLCHAIN_CODE.values_by_name["MISSING_PROVIDER"]._serialized_options = b'\262C\002\010\001'
_TOOLCHAIN_CODE.values_by_name["INVALID_CONSTRAINT_VALUE"]._options = None
_TOOLCHAIN_CODE.values_by_name["INVALID_CONSTRAINT_VALUE"]._serialized_options = b'\262C\002\010\001'
_TOOLCHAIN_CODE.values_by_name["INVALID_PLATFORM_VALUE"]._options = None
_TOOLCHAIN_CODE.values_by_name["INVALID_PLATFORM_VALUE"]._serialized_options = b'\262C\002\010\001'
_TOOLCHAIN_CODE.values_by_name["INVALID_TOOLCHAIN"]._options = None
_TOOLCHAIN_CODE.values_by_name["INVALID_TOOLCHAIN"]._serialized_options = b'\262C\002\010\001'
_TOOLCHAIN_CODE.values_by_name["NO_MATCHING_EXECUTION_PLATFORM"]._options = None
_TOOLCHAIN_CODE.values_by_name["NO_MATCHING_EXECUTION_PLATFORM"]._serialized_options = b'\262C\002\010\001'
_TOOLCHAIN_CODE.values_by_name["NO_MATCHING_TOOLCHAIN"]._options = None
_TOOLCHAIN_CODE.values_by_name["NO_MATCHING_TOOLCHAIN"]._serialized_options = b'\262C\002\010\001'
_TOOLCHAIN_CODE.values_by_name["INVALID_TOOLCHAIN_TYPE"]._options = None
_TOOLCHAIN_CODE.values_by_name["INVALID_TOOLCHAIN_TYPE"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["STARLARK_LOADING_UNKNOWN"]._options = None
_STARLARKLOADING_CODE.values_by_name["STARLARK_LOADING_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_STARLARKLOADING_CODE.values_by_name["CYCLE"]._options = None
_STARLARKLOADING_CODE.values_by_name["CYCLE"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["COMPILE_ERROR"]._options = None
_STARLARKLOADING_CODE.values_by_name["COMPILE_ERROR"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["PARSE_ERROR"]._options = None
_STARLARKLOADING_CODE.values_by_name["PARSE_ERROR"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["EVAL_ERROR"]._options = None
_STARLARKLOADING_CODE.values_by_name["EVAL_ERROR"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["CONTAINING_PACKAGE_NOT_FOUND"]._options = None
_STARLARKLOADING_CODE.values_by_name["CONTAINING_PACKAGE_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["PACKAGE_NOT_FOUND"]._options = None
_STARLARKLOADING_CODE.values_by_name["PACKAGE_NOT_FOUND"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["IO_ERROR"]._options = None
_STARLARKLOADING_CODE.values_by_name["IO_ERROR"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["LABEL_CROSSES_PACKAGE_BOUNDARY"]._options = None
_STARLARKLOADING_CODE.values_by_name["LABEL_CROSSES_PACKAGE_BOUNDARY"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["BUILTINS_ERROR"]._options = None
_STARLARKLOADING_CODE.values_by_name["BUILTINS_ERROR"]._serialized_options = b'\262C\002\010\001'
_STARLARKLOADING_CODE.values_by_name["VISIBILITY_ERROR"]._options = None
_STARLARKLOADING_CODE.values_by_name["VISIBILITY_ERROR"]._serialized_options = b'\262C\002\010\001'
_EXTERNALDEPS_CODE.values_by_name["EXTERNAL_DEPS_UNKNOWN"]._options = None
_EXTERNALDEPS_CODE.values_by_name["EXTERNAL_DEPS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_EXTERNALDEPS_CODE.values_by_name["MODULE_NOT_FOUND"]._options = None
_EXTERNALDEPS_CODE.values_by_name["MODULE_NOT_FOUND"]._serialized_options = b'\262C\002\0100'
_EXTERNALDEPS_CODE.values_by_name["BAD_MODULE"]._options = None
_EXTERNALDEPS_CODE.values_by_name["BAD_MODULE"]._serialized_options = b'\262C\002\0100'
_EXTERNALDEPS_CODE.values_by_name["VERSION_RESOLUTION_ERROR"]._options = None
_EXTERNALDEPS_CODE.values_by_name["VERSION_RESOLUTION_ERROR"]._serialized_options = b'\262C\002\0100'
_EXTERNALDEPS_CODE.values_by_name["INVALID_REGISTRY_URL"]._options = None
_EXTERNALDEPS_CODE.values_by_name["INVALID_REGISTRY_URL"]._serialized_options = b'\262C\002\0100'
_EXTERNALDEPS_CODE.values_by_name["ERROR_ACCESSING_REGISTRY"]._options = None
_EXTERNALDEPS_CODE.values_by_name["ERROR_ACCESSING_REGISTRY"]._serialized_options = b'\262C\002\010 '
_DIFFAWARENESS_CODE.values_by_name["DIFF_AWARENESS_UNKNOWN"]._options = None
_DIFFAWARENESS_CODE.values_by_name["DIFF_AWARENESS_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_DIFFAWARENESS_CODE.values_by_name["DIFF_STAT_FAILED"]._options = None
_DIFFAWARENESS_CODE.values_by_name["DIFF_STAT_FAILED"]._serialized_options = b'\262C\002\010$'
_MODQUERYCOMMAND_CODE.values_by_name["MODQUERY_COMMAND_UNKNOWN"]._options = None
_MODQUERYCOMMAND_CODE.values_by_name["MODQUERY_COMMAND_UNKNOWN"]._serialized_options = b'\262C\002\010%'
_MODQUERYCOMMAND_CODE.values_by_name["MISSING_ARGUMENTS"]._options = None
_MODQUERYCOMMAND_CODE.values_by_name["MISSING_ARGUMENTS"]._serialized_options = b'\262C\002\010\002'
_MODQUERYCOMMAND_CODE.values_by_name["TOO_MANY_ARGUMENTS"]._options = None
_MODQUERYCOMMAND_CODE.values_by_name["TOO_MANY_ARGUMENTS"]._serialized_options = b'\262C\002\010\002'
_MODQUERYCOMMAND_CODE.values_by_name["INVALID_ARGUMENTS"]._options = None
_MODQUERYCOMMAND_CODE.values_by_name["INVALID_ARGUMENTS"]._serialized_options = b'\262C\002\010\002'
_FAILUREDETAILMETADATA._serialized_start=94
_FAILUREDETAILMETADATA._serialized_end=136
_FAILUREDETAIL._serialized_start=139
_FAILUREDETAIL._serialized_end=3820
_INTERRUPTED._serialized_start=3823
_INTERRUPTED._serialized_end=4497
_INTERRUPTED_CODE._serialized_start=3888
_INTERRUPTED_CODE._serialized_end=4497
_SPAWN._serialized_start=4500
_SPAWN._serialized_end=5083
_SPAWN_CODE._serialized_start=4600
_SPAWN_CODE._serialized_end=5083
_EXTERNALREPOSITORY._serialized_start=5086
_EXTERNALREPOSITORY._serialized_end=5346
_EXTERNALREPOSITORY_CODE._serialized_start=5165
_EXTERNALREPOSITORY_CODE._serialized_end=5346
_BUILDPROGRESS._serialized_start=5349
_BUILDPROGRESS._serialized_end=6156
_BUILDPROGRESS_CODE._serialized_start=5418
_BUILDPROGRESS_CODE._serialized_end=6156
_REMOTEOPTIONS._serialized_start=6159
_REMOTEOPTIONS._serialized_end=6482
_REMOTEOPTIONS_CODE._serialized_start=6228
_REMOTEOPTIONS_CODE._serialized_end=6482
_CLIENTENVIRONMENT._serialized_start=6485
_CLIENTENVIRONMENT._serialized_end=6639
_CLIENTENVIRONMENT_CODE._serialized_start=6561
_CLIENTENVIRONMENT_CODE._serialized_end=6639
_CRASH._serialized_start=6642
_CRASH._serialized_end=6823
_CRASH_CODE._serialized_start=6769
_CRASH_CODE._serialized_end=6823
_THROWABLE._serialized_start=6825
_THROWABLE._serialized_end=6899
_SYMLINKFOREST._serialized_start=6902
_SYMLINKFOREST._serialized_end=7133
_SYMLINKFOREST_CODE._serialized_start=6971
_SYMLINKFOREST_CODE._serialized_end=7133
_BUILDREPORT._serialized_start=7136
_BUILDREPORT._serialized_end=7332
_BUILDREPORT_CODE._serialized_start=7201
_BUILDREPORT_CODE._serialized_end=7332
_PACKAGEOPTIONS._serialized_start=7335
_PACKAGEOPTIONS._serialized_end=7492
_PACKAGEOPTIONS_CODE._serialized_start=7405
_PACKAGEOPTIONS_CODE._serialized_end=7492
_REMOTEEXECUTION._serialized_start=7495
_REMOTEEXECUTION._serialized_end=8314
_REMOTEEXECUTION_CODE._serialized_start=7568
_REMOTEEXECUTION_CODE._serialized_end=8314
_EXECUTION._serialized_start=8317
_EXECUTION._serialized_end=10069
_EXECUTION_CODE._serialized_start=8378
_EXECUTION_CODE._serialized_end=10069
_WORKSPACES._serialized_start=10072
_WORKSPACES._serialized_end=10396
_WORKSPACES_CODE._serialized_start=10135
_WORKSPACES_CODE._serialized_end=10396
_CRASHOPTIONS._serialized_start=10398
_CRASHOPTIONS._serialized_end=10510
_CRASHOPTIONS_CODE._serialized_start=10464
_CRASHOPTIONS_CODE._serialized_end=10510
_FILESYSTEM._serialized_start=10513
_FILESYSTEM._serialized_end=10819
_FILESYSTEM_CODE._serialized_start=10576
_FILESYSTEM_CODE._serialized_end=10819
_EXECUTIONOPTIONS._serialized_start=10822
_EXECUTIONOPTIONS._serialized_end=11312
_EXECUTIONOPTIONS_CODE._serialized_start=10897
_EXECUTIONOPTIONS_CODE._serialized_end=11312
_COMMAND._serialized_start=11315
_COMMAND._serialized_end=11977
_COMMAND_CODE._serialized_start=11372
_COMMAND_CODE._serialized_end=11977
_GRPCSERVER._serialized_start=11980
_GRPCSERVER._serialized_end=12216
_GRPCSERVER_CODE._serialized_start=12043
_GRPCSERVER_CODE._serialized_end=12216
_CANONICALIZEFLAGS._serialized_start=12219
_CANONICALIZEFLAGS._serialized_end=12372
_CANONICALIZEFLAGS_CODE._serialized_start=12295
_CANONICALIZEFLAGS_CODE._serialized_end=12372
_BUILDCONFIGURATION._serialized_start=12375
_BUILDCONFIGURATION._serialized_end=12953
_BUILDCONFIGURATION_CODE._serialized_start=12454
_BUILDCONFIGURATION_CODE._serialized_end=12953
_INFOCOMMAND._serialized_start=12956
_INFOCOMMAND._serialized_end=13189
_INFOCOMMAND_CODE._serialized_start=13021
_INFOCOMMAND_CODE._serialized_end=13189
_MEMORYOPTIONS._serialized_start=13192
_MEMORYOPTIONS._serialized_end=13451
_MEMORYOPTIONS_CODE._serialized_start=13261
_MEMORYOPTIONS_CODE._serialized_end=13451
_QUERY._serialized_start=13454
_QUERY._serialized_end=14976
_QUERY_CODE._serialized_start=13507
_QUERY_CODE._serialized_end=14976
_LOCALEXECUTION._serialized_start=14979
_LOCALEXECUTION._serialized_end=15132
_LOCALEXECUTION_CODE._serialized_start=15049
_LOCALEXECUTION_CODE._serialized_end=15132
_ACTIONCACHE._serialized_start=15135
_ACTIONCACHE._serialized_end=15273
_ACTIONCACHE_CODE._serialized_start=15199
_ACTIONCACHE_CODE._serialized_end=15273
_FETCHCOMMAND._serialized_start=15276
_FETCHCOMMAND._serialized_end=15507
_FETCHCOMMAND_CODE._serialized_start=15343
_FETCHCOMMAND_CODE._serialized_end=15507
_SYNCCOMMAND._serialized_start=15510
_SYNCCOMMAND._serialized_end=15758
_SYNCCOMMAND_CODE._serialized_start=15575
_SYNCCOMMAND_CODE._serialized_end=15758
_SANDBOX._serialized_start=15761
_SANDBOX._serialized_end=16268
_SANDBOX_CODE._serialized_start=15818
_SANDBOX_CODE._serialized_end=16268
_INCLUDESCANNING._serialized_start=16271
_INCLUDESCANNING._serialized_end=16812
_INCLUDESCANNING_CODE._serialized_start=16412
_INCLUDESCANNING_CODE._serialized_end=16812
_TESTCOMMAND._serialized_start=16815
_TESTCOMMAND._serialized_end=17003
_TESTCOMMAND_CODE._serialized_start=16879
_TESTCOMMAND_CODE._serialized_end=17003
_ACTIONQUERY._serialized_start=17006
_ACTIONQUERY._serialized_end=17684
_ACTIONQUERY_CODE._serialized_start=17071
_ACTIONQUERY_CODE._serialized_end=17684
_TARGETPATTERNS._serialized_start=17687
_TARGETPATTERNS._serialized_end=18542
_TARGETPATTERNS_CODE._serialized_start=17758
_TARGETPATTERNS_CODE._serialized_end=18542
_CLEANCOMMAND._serialized_start=18545
_CLEANCOMMAND._serialized_end=19046
_CLEANCOMMAND_CODE._serialized_start=18612
_CLEANCOMMAND_CODE._serialized_end=19046
_CONFIGCOMMAND._serialized_start=19049
_CONFIGCOMMAND._serialized_end=19226
_CONFIGCOMMAND_CODE._serialized_start=19117
_CONFIGCOMMAND_CODE._serialized_end=19226
_CONFIGURABLEQUERY._serialized_start=19229
_CONFIGURABLEQUERY._serialized_end=19800
_CONFIGURABLEQUERY_CODE._serialized_start=19306
_CONFIGURABLEQUERY_CODE._serialized_end=19800
_DUMPCOMMAND._serialized_start=19803
_DUMPCOMMAND._serialized_end=20100
_DUMPCOMMAND_CODE._serialized_start=19868
_DUMPCOMMAND_CODE._serialized_end=20100
_HELPCOMMAND._serialized_start=20103
_HELPCOMMAND._serialized_end=20265
_HELPCOMMAND_CODE._serialized_start=20167
_HELPCOMMAND_CODE._serialized_end=20265
_MOBILEINSTALL._serialized_start=20268
_MOBILEINSTALL._serialized_end=20573
_MOBILEINSTALL_CODE._serialized_start=20337
_MOBILEINSTALL_CODE._serialized_end=20573
_PROFILECOMMAND._serialized_start=20576
_PROFILECOMMAND._serialized_end=20760
_PROFILECOMMAND_CODE._serialized_start=20646
_PROFILECOMMAND_CODE._serialized_end=20760
_RUNCOMMAND._serialized_start=20763
_RUNCOMMAND._serialized_end=21473
_RUNCOMMAND_CODE._serialized_start=20826
_RUNCOMMAND_CODE._serialized_end=21473
_VERSIONCOMMAND._serialized_start=21476
_VERSIONCOMMAND._serialized_end=21614
_VERSIONCOMMAND_CODE._serialized_start=21546
_VERSIONCOMMAND_CODE._serialized_end=21614
_PRINTACTIONCOMMAND._serialized_start=21617
_PRINTACTIONCOMMAND._serialized_end=21881
_PRINTACTIONCOMMAND_CODE._serialized_start=21696
_PRINTACTIONCOMMAND_CODE._serialized_end=21881
_WORKSPACESTATUS._serialized_start=21884
_WORKSPACESTATUS._serialized_end=22212
_WORKSPACESTATUS_CODE._serialized_start=21957
_WORKSPACESTATUS_CODE._serialized_end=22212
_JAVACOMPILE._serialized_start=22215
_JAVACOMPILE._serialized_end=22491
_JAVACOMPILE_CODE._serialized_start=22280
_JAVACOMPILE_CODE._serialized_end=22491
_ACTIONREWINDING._serialized_start=22494
_ACTIONREWINDING._serialized_end=22680
_ACTIONREWINDING_CODE._serialized_start=22566
_ACTIONREWINDING_CODE._serialized_end=22680
_CPPCOMPILE._serialized_start=22683
_CPPCOMPILE._serialized_end=23208
_CPPCOMPILE_CODE._serialized_start=22746
_CPPCOMPILE_CODE._serialized_end=23208
_STARLARKACTION._serialized_start=23211
_STARLARKACTION._serialized_end=23412
_STARLARKACTION_CODE._serialized_start=23282
_STARLARKACTION_CODE._serialized_end=23412
_NINJAACTION._serialized_start=23415
_NINJAACTION._serialized_end=23599
_NINJAACTION_CODE._serialized_start=23479
_NINJAACTION_CODE._serialized_end=23599
_DYNAMICEXECUTION._serialized_start=23602
_DYNAMICEXECUTION._serialized_end=23857
_DYNAMICEXECUTION_CODE._serialized_start=23677
_DYNAMICEXECUTION_CODE._serialized_end=23857
_FAILACTION._serialized_start=23860
_FAILACTION._serialized_end=24262
_FAILACTION_CODE._serialized_start=23923
_FAILACTION_CODE._serialized_end=24262
_SYMLINKACTION._serialized_start=24265
_SYMLINKACTION._serialized_end=24572
_SYMLINKACTION_CODE._serialized_start=24334
_SYMLINKACTION_CODE._serialized_end=24572
_CPPLINK._serialized_start=24575
_CPPLINK._serialized_end=24749
_CPPLINK_CODE._serialized_start=24631
_CPPLINK_CODE._serialized_end=24749
_LTOACTION._serialized_start=24752
_LTOACTION._serialized_end=24967
_LTOACTION_CODE._serialized_start=24813
_LTOACTION_CODE._serialized_end=24967
_TESTACTION._serialized_start=24970
_TESTACTION._serialized_end=25248
_TESTACTION_CODE._serialized_start=25033
_TESTACTION_CODE._serialized_end=25248
_WORKER._serialized_start=25251
_WORKER._serialized_end=25748
_WORKER_CODE._serialized_start=25306
_WORKER_CODE._serialized_end=25748
_ANALYSIS._serialized_start=25751
_ANALYSIS._serialized_end=26581
_ANALYSIS_CODE._serialized_start=25810
_ANALYSIS_CODE._serialized_end=26581
_PACKAGELOADING._serialized_start=26584
_PACKAGELOADING._serialized_end=27792
_PACKAGELOADING_CODE._serialized_start=26655
_PACKAGELOADING_CODE._serialized_end=27792
_TOOLCHAIN._serialized_start=27795
_TOOLCHAIN._serialized_end=28135
_TOOLCHAIN_CODE._serialized_start=27856
_TOOLCHAIN_CODE._serialized_end=28135
_STARLARKLOADING._serialized_start=28138
_STARLARKLOADING._serialized_end=28536
_STARLARKLOADING_CODE._serialized_start=28211
_STARLARKLOADING_CODE._serialized_end=28536
_EXTERNALDEPS._serialized_start=28539
_EXTERNALDEPS._serialized_end=28805
_EXTERNALDEPS_CODE._serialized_start=28606
_EXTERNALDEPS_CODE._serialized_end=28805
_DIFFAWARENESS._serialized_start=28808
_DIFFAWARENESS._serialized_end=28946
_DIFFAWARENESS_CODE._serialized_start=28876
_DIFFAWARENESS_CODE._serialized_end=28946
_MODQUERYCOMMAND._serialized_start=28949
_MODQUERYCOMMAND._serialized_end=29156
_MODQUERYCOMMAND_CODE._serialized_start=29022
_MODQUERYCOMMAND_CODE._serialized_end=29156
# @@protoc_insertion_point(module_scope)
|
xmanager-main
|
xmanager/generated/failure_details_pb2.py
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: disable
# Generated by the protocol buffer compiler. DO NOT EDIT!
# pylint: skip-file
# source: src/main/protobuf/command_line.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import option_filters_pb2 as src_dot_main_dot_protobuf_dot_option__filters__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$src/main/protobuf/command_line.proto\x12\x0c\x63ommand_line\x1a&src/main/protobuf/option_filters.proto\"]\n\x0b\x43ommandLine\x12\x1a\n\x12\x63ommand_line_label\x18\x01 \x01(\t\x12\x32\n\x08sections\x18\x02 \x03(\x0b\x32 .command_line.CommandLineSection\"\x9b\x01\n\x12\x43ommandLineSection\x12\x15\n\rsection_label\x18\x01 \x01(\t\x12-\n\nchunk_list\x18\x02 \x01(\x0b\x32\x17.command_line.ChunkListH\x00\x12/\n\x0boption_list\x18\x03 \x01(\x0b\x32\x18.command_line.OptionListH\x00\x42\x0e\n\x0csection_type\"\x1a\n\tChunkList\x12\r\n\x05\x63hunk\x18\x01 \x03(\t\"2\n\nOptionList\x12$\n\x06option\x18\x01 \x03(\x0b\x32\x14.command_line.Option\"\xac\x01\n\x06Option\x12\x15\n\rcombined_form\x18\x01 \x01(\t\x12\x13\n\x0boption_name\x18\x02 \x01(\t\x12\x14\n\x0coption_value\x18\x03 \x01(\t\x12-\n\x0b\x65\x66\x66\x65\x63t_tags\x18\x04 \x03(\x0e\x32\x18.options.OptionEffectTag\x12\x31\n\rmetadata_tags\x18\x05 \x03(\x0e\x32\x1a.options.OptionMetadataTagB-\n+com.google.devtools.build.lib.runtime.protob\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'src.main.protobuf.command_line_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n+com.google.devtools.build.lib.runtime.proto'
_COMMANDLINE._serialized_start=94
_COMMANDLINE._serialized_end=187
_COMMANDLINESECTION._serialized_start=190
_COMMANDLINESECTION._serialized_end=345
_CHUNKLIST._serialized_start=347
_CHUNKLIST._serialized_end=373
_OPTIONLIST._serialized_start=375
_OPTIONLIST._serialized_end=425
_OPTION._serialized_start=428
_OPTION._serialized_end=600
# @@protoc_insertion_point(module_scope)
|
xmanager-main
|
xmanager/generated/command_line_pb2.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mock implemenation of core XM objects.
This module is mainly purposed for unit tests. It provides implementation of
xm.Experiment, xm.WorkUnit, xm.MetadataContext which don't do much apart from
allowing to inspect what methods were called.
It also may be used as a stub implementation for cases when one doesn't care
how experiment is set up, but wants to maintain API consistency. For examle if
a framework provides own local run mode and wants to ignore any experiment
metadata manipulations.
"""
import asyncio
from concurrent import futures
from typing import Any, Awaitable, Callable, List, Mapping, Optional, Set
import attr
from xmanager.xm import async_packager
from xmanager.xm import core
from xmanager.xm import id_predictor
from xmanager.xm import job_blocks
from xmanager.xm import metadata_context
class MockContextAnnotations(metadata_context.ContextAnnotations):
"""ContextAnnotations which stores all data in memory."""
def __init__(self) -> None:
self._title = ''
self._tags = set()
self._notes = ''
@property
def title(self) -> str:
return self._title
def set_title(self, title: str) -> None:
self._title = title
@property
def tags(self) -> Set[str]:
return self._tags
def add_tags(self, *tags: str) -> None:
self._tags.update(tags)
def remove_tags(self, *tags: str) -> None:
for tag in tags:
self._tags.discard(tag)
@property
def notes(self) -> str:
return self._notes
def set_notes(self, notes: str) -> None:
self._notes = notes
class MockMetadataContext(metadata_context.MetadataContext):
"""A MetadataContext which stores all data in memory."""
def __init__(self) -> None:
super().__init__(creator='unknown', annotations=MockContextAnnotations())
class MockExperimentUnit(core.WorkUnit):
"""A mock version of WorkUnit with abstract methods implemented."""
def __init__(
self,
experiment: core.Experiment,
work_unit_id_predictor: id_predictor.Predictor,
create_task: Callable[[Awaitable[Any]], futures.Future[Any]],
launched_jobs: List[job_blocks.JobType],
launched_jobs_args: List[Optional[Mapping[str, Any]]],
args: Optional[Mapping[str, Any]],
role: core.ExperimentUnitRole,
identity: str = '',
) -> None:
super().__init__(experiment, create_task, args, role, identity)
self._launched_jobs = launched_jobs
self._launched_jobs_args = launched_jobs_args
self._work_unit_id = work_unit_id_predictor.reserve_id()
async def _wait_until_complete(self) -> None:
"""Mock work unit is immediately complete."""
async def _launch_job_group(
self,
job_group: job_blocks.JobGroup,
args: Optional[Mapping[str, Any]],
identity: str,
) -> None:
"""Appends the job group to the launched_jobs list."""
self._launched_jobs.extend(job_group.jobs.values())
self._launched_jobs_args.append(args)
@property
def work_unit_id(self) -> int:
return self._work_unit_id
@property
def experiment_unit_name(self) -> str:
return f'{self.experiment_id}_{self._work_unit_id}'
class MockExperiment(core.Experiment):
"""A mock version of Experiment with abstract methods implemented."""
constraints: List[job_blocks.JobType]
_async_packager = async_packager.AsyncPackager(
lambda packageables: [MockExecutable()] * len(packageables)
)
def __init__(self) -> None:
super().__init__()
self.launched_jobs = []
self.launched_jobs_args = []
self._work_units = []
self._auxiliary_units = []
self._context = MockMetadataContext()
def _create_experiment_unit(
self,
args: Optional[Mapping[str, Any]],
role: core.ExperimentUnitRole = core.WorkUnitRole(),
identity: str = '',
) -> Awaitable[MockExperimentUnit]:
"""Creates a new WorkUnit instance for the experiment."""
future = asyncio.Future(loop=self._event_loop)
experiment_unit = MockExperimentUnit(
self,
self._work_unit_id_predictor,
self._create_task,
self.launched_jobs,
self.launched_jobs_args,
args,
role,
identity,
)
if isinstance(role, core.WorkUnitRole):
self._work_units.append(experiment_unit)
elif isinstance(role, core.AuxiliaryUnitRole):
self._auxiliary_units.append(experiment_unit)
else:
raise TypeError(f'Unsupported role: {role!r}')
future.set_result(experiment_unit)
return future
@property
def work_unit_count(self) -> int:
return len(self.work_units)
@property
def work_units(self):
return self._work_units
@property
def auxiliary_units(self):
return self._auxiliary_units
@property
def experiment_id(self) -> int:
return 1
@property
def context(self) -> MockMetadataContext:
"""Returns metadata context for the experiment."""
return self._context
def _should_reload_experiment_unit(
self, role: core.ExperimentUnitRole
) -> bool:
return False
def _get_experiment_unit(
self,
experiment_id: int,
identity: str,
role: core.ExperimentUnitRole,
args: Optional[Mapping[str, Any]] = None,
) -> Awaitable[core.ExperimentUnit]:
del identity
future = asyncio.Future()
future.set_result(self.work_units[0])
return future
class MockExecutable(job_blocks.Executable):
"""A mock version of Executable with abstract methods implemented."""
counter = 0
def __init__(self):
super().__init__(name=f'{MockExecutable.counter}')
MockExecutable.counter += 1
class MockExecutor(job_blocks.Executor):
"""A mock version of Executor with abstract methods implemented."""
Spec = job_blocks.ExecutorSpec # pylint: disable=invalid-name
@attr.s(auto_attribs=True)
class MockConstraint(job_blocks.Constraint):
id: str
|
xmanager-main
|
xmanager/xm_mock/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience adapter for the standard client."""
import functools
import subprocess
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Union
from absl import flags
from absl import logging
import docker
from docker import errors
from docker import types
from docker.models import containers
from docker.utils import utils
_USE_SUBPROCESS = flags.DEFINE_bool(
'xm_subprocess_docker_impl',
False,
'Launch docker using `subprocess` command.',
)
Ports = Dict[Union[int, str], Union[None, int, Tuple[str, int], List[int]]]
@functools.lru_cache()
def instance() -> 'DockerAdapter':
"""Returns a thread-safe singleton adapter derived from the environment.
Allows the user to ignore the complexities of the underlying library, and
focus on a concrete small subset of required actions.
"""
return DockerAdapter(docker.from_env())
class DockerAdapter(object):
"""Convenience adapter for the standard client."""
def __init__(self, client: docker.DockerClient) -> None:
self._client = client
def has_network(self, name: str) -> bool:
return bool(self._client.networks.list([name]))
def create_network(self, name: str) -> str:
return self._client.networks.create(name).id
def get_client(self) -> docker.DockerClient:
return self._client
def is_registry_label(self, label: str) -> bool:
try:
self._client.images.get_registry_data(label)
return True
except errors.NotFound:
return False
def split_tag(self, image_tag: str) -> Tuple[str, str]:
repository, tag = utils.parse_repository_tag(image_tag)
return repository, tag or 'latest'
def pull_image(self, image_tag: str) -> str:
repository, tag = self.split_tag(image_tag)
# Without a tag, Docker will try to pull every image instead of latest.
# From docker>=4.4.0, use `client.image.pull(*args, all_tags=False)`.
return self._client.images.pull(repository, tag=tag).id
def load_image(self, path: str) -> str:
with open(path, 'rb') as data:
images = self._client.images.load(data)
if len(images) != 1:
raise ValueError(f'{path} must contain precisely one image')
return images[0].id
def run_container(
self,
name: str,
image_id: str,
args: Sequence[str],
env_vars: Mapping[str, str],
network: str,
ports: Ports,
volumes: Dict[str, str],
gpu_count: int,
interactive: bool = False,
) -> Optional[containers.Container]:
"""Runs a given container image."""
if _USE_SUBPROCESS.value or interactive:
return self.run_container_subprocess(
image_id,
args,
env_vars,
network,
ports,
volumes,
gpu_count,
interactive,
)
else:
return self.run_container_client(
name, image_id, args, env_vars, network, ports, volumes, gpu_count
)
def run_container_client(
self,
name: str,
image_id: str,
args: Sequence[str],
env_vars: Mapping[str, str],
network: str,
ports: Ports,
volumes: Dict[str, str],
gpu_count: int,
) -> containers.Container:
"""Runs a given container image using Python Docker client."""
make_mount = lambda guest: {'bind': guest, 'mode': 'rw'}
device_requests = [
types.DeviceRequest(count=gpu_count, capabilities=[['gpu']])
]
return self._client.containers.run(
image_id,
name=name,
hostname=name,
network=network,
detach=True,
remove=True,
command=args,
environment=env_vars,
ports=ports,
volumes={host: make_mount(guest) for host, guest in volumes.items()},
runtime='nvidia' if gpu_count else None,
device_requests=device_requests if gpu_count else None,
)
def run_container_subprocess(
self,
image_id: str,
args: Sequence[str],
env_vars: Mapping[str, str],
network: str,
ports: Ports,
volumes: Dict[str, str],
gpu_count: int,
interactive: bool,
) -> None:
"""Runs a given container image calling `docker` in a Subprocess."""
# TODO: consider using asyncio.create_subprocess_exec() to unify it
# with LocalBinary processing.
cmd = ['docker', 'run']
if network:
cmd.extend(['--network', network])
for in_port, out_port in ports.items():
cmd.extend(['-p', f'{in_port}:{out_port}'])
for key, value in env_vars.items():
cmd.extend(['-e', f'{key}={value}'])
for key, value in volumes.items():
cmd.extend(['-v', f'{key}:{value}'])
if gpu_count:
cmd.extend(['--gpus', f'{gpu_count}'])
cmd.extend(['--runtime', 'nvidia'])
if interactive:
print('Entering shell mode.')
cmd.extend(['-it', '--entrypoint', 'bash', image_id])
else:
cmd.extend([image_id] + list(args))
subprocess.run(args=cmd, check=True)
return None
def stop_container(self, container_id: str) -> None:
try:
self._client.containers.get(container_id).stop()
except docker.errors.NotFound:
logging.warning(
(
'Container %s could not be stopped as it was not found '
'(it may already have been stopped)'
),
container_id,
)
|
xmanager-main
|
xmanager/docker/docker_adapter.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xmanager command-line interface."""
import errno
import importlib
import os
import sys
from absl import app
_DEFAULT_ZONE = 'us-west1-b'
_DEFAULT_CLUSTER_NAME = 'xmanager-via-caliban'
def main(argv):
if len(argv) < 3:
raise app.UsageError('There must be at least 2 command-line arguments')
cmd = argv[1]
if cmd == 'launch':
launch_script = argv[2]
if not os.path.exists(launch_script):
raise OSError(errno.ENOENT, f'File not found: {launch_script}')
sys.path.insert(0, os.path.abspath(os.path.dirname(launch_script)))
launch_module, _ = os.path.splitext(os.path.basename(launch_script))
m = importlib.import_module(launch_module)
sys.path.pop(0)
argv = [
launch_script,
'--xm_launch_script={}'.format(launch_script),
] + argv[3:]
app.run(m.main, argv=argv)
elif cmd == 'cluster':
caliban_gke = importlib.import_module('caliban.platform.gke.cli')
caliban_gke_types = importlib.import_module('caliban.platform.gke.types')
subcmd = argv[2]
args = {
'dry_run': False,
'cluster_name': _DEFAULT_CLUSTER_NAME,
'zone': _DEFAULT_ZONE,
'release_channel': caliban_gke_types.ReleaseChannel.REGULAR,
'single_zone': True,
}
if subcmd == 'create':
caliban_gke._cluster_create(args) # pylint: disable=protected-access
elif subcmd == 'delete':
caliban_gke._cluster_delete(args) # pylint: disable=protected-access
else:
raise app.UsageError(
f'Subcommand `{cmd} {subcmd}` is not a supported subcommand'
)
else:
raise app.UsageError(f'Command `{cmd}` is not a supported command')
def entrypoint():
app.run(main)
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
xmanager/cli/cli.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the xmanager.contrib.gcs module."""
import getpass
from absl import app
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
from xmanager.contrib import gcs
# Error patterns.
_GCS_PATH_ERROR = (
'--xm_gcs_path not in gs://bucket/directory or /gcs/path format.'
)
_PATH_ERROR = 'Path not in gs://bucket/directory or /gcs/path format'
class GcsTest(parameterized.TestCase):
def test_gcs_path_empty_flag(self):
with self.assertRaisesRegex(app.UsageError, '--xm_gcs_path is missing'):
gcs.get_gcs_path_or_fail('project')
def test_gcs_path_correct_value(self):
with flagsaver.flagsaver(xm_gcs_path='gs://bucket/dir'):
self.assertEqual(gcs.get_gcs_path_or_fail('project'), 'gs://bucket/dir')
def test_gcs_path_incorrect_value(self):
with flagsaver.flagsaver(xm_gcs_path='file://dir'):
with self.assertRaisesRegex(app.UsageError, _GCS_PATH_ERROR):
gcs.get_gcs_path_or_fail('project')
# pylint: disable=bad-whitespace
@parameterized.named_parameters(
('gs_long', 'gs://a/b/c', True, False, True),
('gs_short', 'gs://d/e', True, False, True),
('gs_invalid', 'gs:/d/e', False, False, False),
('fuse_long', '/gcs/a/b/c', False, True, True),
('fuse_short', '/gcs/d/e', False, True, True),
('fuse_invalid', '/gcsc/d/e', False, False, False),
('invalid', 'a/b/f', False, False, False),
) # pyformat:disable
# pylint: enable=bad-whitespace
def test_is_path(self, path, expected_gs, expected_fuse, expected_gcs):
self.assertEqual(gcs.is_gs_path(path), expected_gs)
self.assertEqual(gcs.is_gcs_fuse_path(path), expected_fuse)
self.assertEqual(gcs.is_gcs_path(path), expected_gcs)
def test_get_gcs_url(self):
self.assertEqual(
gcs.get_gcs_url('gs://a/b/c'),
f'{gcs.gcp_website_url}/storage/browser/a/b/c',
)
self.assertEqual(
gcs.get_gcs_url('gs://d/e'),
f'{gcs.gcp_website_url}/storage/browser/d/e',
)
self.assertEqual(
gcs.get_gcs_url('/gcs/a/b/c'),
f'{gcs.gcp_website_url}/storage/browser/a/b/c',
)
self.assertEqual(
gcs.get_gcs_url('/gcs/d/e'),
f'{gcs.gcp_website_url}/storage/browser/d/e',
)
with self.assertRaisesRegex(ValueError, _PATH_ERROR):
gcs.get_gcs_url('a/b/f')
def test_get_gcs_fuse_path(self):
self.assertEqual(gcs.get_gcs_fuse_path('gs://a/b/c'), '/gcs/a/b/c')
self.assertEqual(gcs.get_gcs_fuse_path('gs://d/e'), '/gcs/d/e')
self.assertEqual(gcs.get_gcs_fuse_path('/gcs/a/b/c'), '/gcs/a/b/c')
self.assertEqual(gcs.get_gcs_fuse_path('/gcs/d/e'), '/gcs/d/e')
with self.assertRaisesRegex(ValueError, _PATH_ERROR):
gcs.get_gcs_fuse_path('a/b/f')
def test_get_gs_path(self):
self.assertEqual(gcs.get_gs_path('gs://a/b/c'), 'gs://a/b/c')
self.assertEqual(gcs.get_gs_path('gs://d/e'), 'gs://d/e')
self.assertEqual(gcs.get_gs_path('/gcs/a/b/c'), 'gs://a/b/c')
self.assertEqual(gcs.get_gs_path('/gcs/d/e'), 'gs://d/e')
with self.assertRaisesRegex(ValueError, _PATH_ERROR):
gcs.get_gcs_fuse_path('a/b/f')
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/contrib/gcs_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for transforming source code using copybara.
XManager primarily uses Copybara to run folder-to-folder workflows in the form:
core.workflow(
name = "folder_to_folder",
origin = folder.origin(),
destination = folder.destination(),
...
)
Copybara allows for iterative local development on multiple platforms without
needing to add/remove platform-specific code modifications. This allows you to
preprocess source code so that it can be run on different platforms with
different executors. e.g.
local_version = run_workflow(config, 'local', path)
vertex_version = run_workflow(config, 'vertex', path)
local_spec = xm.PythonContainer(path=local_version, **kwargs)
vertex_spec = xm.PythonContainer(path=vertex_version, **kwargs)
[local_executable, vertex_executable] = experiment.package([
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Local.Spec()),
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec())])
Copybara has no release process, so you must compile copybara yourself:
https://github.com/google/copybara
"""
import os
import subprocess
import tempfile
from typing import Optional
# Set with the compiled path to copybara e.g.
# COPYBARA_BIN = 'bazel-bin/java/com/google/copybara/copybara_deploy.jar'
COPYBARA_BIN = 'copybara'
def run_workflow(
config: str,
workflow: str,
origin_folder: str,
destination_folder: Optional[str] = None,
config_root: Optional[str] = None,
) -> str:
"""Run a workflow in a copybara config to transform origin to destination.
Args:
config: Path to the Copybara config.
workflow: Name of a workflow in copybara config.
origin_folder: The origin folder to use as input. This will passed to
Copybara via the source_ref argument.
destination_folder: The destination folder to output.
config_root: Configuration root path to be used for resolving absolute
config labels like '//foo/bar'.
Returns:
The output destination folder.
"""
origin_folder = os.path.abspath(origin_folder)
if not destination_folder:
destination_folder = tempfile.mkdtemp()
command = [
COPYBARA_BIN,
config,
workflow,
'--ignore-noop',
origin_folder,
'--folder-dir=' + destination_folder,
]
if config_root:
command += ['--config-root=' + config_root]
print('Copybara command: ', command)
subprocess.run(command, check=True)
return destination_folder
|
xmanager-main
|
xmanager/contrib/copybara.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for setting up distributed TF experiments.
Supported distributed setups:
- tf.distribute.MultiWorkerMirroredStrategy
- tf.distribute.ParameterServerStrategy
"""
import json
from typing import Awaitable, Callable
import attr
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import addressing
@attr.s(auto_attribs=True)
class MultiWorkerMirroredStrategyBuilder:
"""Run a Tensorflow MultiWorkerMirroredStrategy experiment.
https://www.tensorflow.org/api_docs/python/tf/distribute/MultiWorkerMirroredStrategy
Usage:
builder = MultiWorkerMirroredStrategyBuilder(
experiment, worker_executable, worker_executor, num_workers=4)
for hparams in hyperparameters:
experiment.add(build.gen_job_group(), hparams)
"""
experiment: xm.Experiment
worker_executable: xm.Executable
worker_executor: xm.Executor
worker_name: str = 'worker'
num_workers: int = 1
def create_job_group(
self, work_unit: xm.WorkUnit, hparams: xm.UserArgs
) -> xm.JobGroup:
if isinstance(self.worker_executor, xm_local.Kubernetes):
return self.create_kubernetes_job_group(work_unit, hparams)
raise NotImplementedError(
'MultiWorkerMirrored is not supported for executor_type '
f'`{type(self.worker_executor)}`'
)
def gen_job_group(self) -> Callable[[xm.WorkUnit], Awaitable[None]]:
"""Create a generator that can be be used with experiment.add(generator)."""
async def _gen_job_group(
work_unit: xm.WorkUnit, **hparams
) -> Awaitable[None]:
job = self.create_job_group(work_unit, hparams)
return work_unit.add(job)
return _gen_job_group
def create_kubernetes_job_group(
self, work_unit: xm.WorkUnit, hparams: xm.UserArgs
) -> xm.JobGroup:
"""Builds a Kubernetes job group that can be added to an experiment."""
assert isinstance(self.worker_executor, xm_local.Kubernetes)
worker_job_domains = {}
for i in range(self.num_workers):
job_name = f'{self.worker_name}-{i}'
worker_job_domains[job_name] = addressing.k8s_pod_domain(
job_name=job_name,
experiment_id=self.experiment.experiment_id,
work_unit_id=work_unit.work_unit_id,
)
jobs = {}
for i, worker_job_name in enumerate(worker_job_domains):
tf_config = {
'cluster': {'worker': list(worker_job_domains.values())},
'task': {'type': 'worker', 'index': i},
}
jobs[worker_job_name] = xm.Job(
executable=self.worker_executable,
executor=self.worker_executor,
args=hparams,
env_vars={
'TF_CONFIG': json.dumps(tf_config),
},
)
return xm.JobGroup(**jobs)
@attr.s(auto_attribs=True)
class ParameterServerStrategyBuilder:
"""Builds a Tensorflow ParameterServer experiment in XManager.
https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/ParameterServerStrategy
Usage:
builder = ParameterServerStrategyBuilder(experiment)
for hparams in hyperparameters:
experiment.add(builder.gen_job_group(), hparams)
"""
experiment: xm.Experiment
chief_executable: xm.Executable
chief_executor: xm.Executor
worker_executable: xm.Executable
worker_executor: xm.Executor
ps_executable: xm.Executable
ps_executor: xm.Executor
chief_name: str = 'chief'
worker_name: str = 'worker'
ps_name: str = 'ps'
num_workers: int = 1
num_ps: int = 1
def create_job_group(
self, work_unit: xm.WorkUnit, hparams: xm.UserArgs
) -> xm.JobGroup:
if isinstance(self.worker_executor, xm_local.Kubernetes):
return self.create_kubernetes_job_group(work_unit, hparams)
raise NotImplementedError(
'ParameterServerStrategy is not supported for executor_type '
f'`{type(self.worker_executor)}`'
)
def gen_job_group(self) -> Callable[[xm.WorkUnit], Awaitable[None]]:
"""Create a generator that can be be used with experiment.add(generator)."""
async def _gen_job_group(
work_unit: xm.WorkUnit, **hparams
) -> Awaitable[None]:
job = self.create_job_group(work_unit, hparams)
return work_unit.add(job)
return _gen_job_group
def create_kubernetes_job_group(
self, work_unit: xm.WorkUnit, hparams: xm.UserArgs
) -> xm.JobGroup:
"""Builds a Kubernetes job group that can be added to an experiment."""
assert isinstance(self.chief_executor, xm_local.Kubernetes)
assert isinstance(self.worker_executor, xm_local.Kubernetes)
assert isinstance(self.ps_executor, xm_local.Kubernetes)
def _k8s_pod_domain(job_name: str) -> str:
return addressing.k8s_pod_domain(
job_name, self.experiment.experiment_id, work_unit.work_unit_id
)
chief_domain = _k8s_pod_domain(self.chief_name)
# pylint: disable=g-complex-comprehension
worker_domains = [
addressing.k8s_pod_domain(
f'{self.worker_name}-{i}',
self.experiment.experiment_id,
work_unit.work_unit_id,
)
for i in range(self.num_workers)
]
ps_domains = [
addressing.k8s_pod_domain(
f'{self.ps_name}-{i}',
self.experiment.experiment_id,
work_unit.work_unit_id,
)
for i in range(self.num_ps)
]
# pylint: enable=g-complex-comprehension
def _create_tf_config(task_type, task_index):
return {
'cluster': {
'chief': [chief_domain],
'worker': worker_domains,
'ps': ps_domains,
},
'task': {
'type': task_type,
'index': task_index,
},
}
jobs = {}
jobs[self.chief_name] = xm.Job(
executable=self.chief_executable,
executor=self.chief_executor,
env_vars={'TF_CONFIG': json.dumps(_create_tf_config('chief', 0))},
)
for i in range(self.num_ps):
ps_job_name = f'{self.ps_name}-{i}'
jobs[ps_job_name] = xm.Job(
executable=self.ps_executable,
executor=self.ps_executor,
args=hparams,
env_vars={
'TF_CONFIG': json.dumps(_create_tf_config('ps', i)),
},
)
for i in range(self.num_workers):
worker_job_name = f'{self.worker_name}-{i}'
jobs[worker_job_name] = xm.Job(
executable=self.worker_executable,
executor=self.worker_executor,
args=hparams,
env_vars={
'TF_CONFIG': json.dumps(_create_tf_config('worker', i)),
},
)
return xm.JobGroup(**jobs)
|
xmanager-main
|
xmanager/contrib/xm_tensorflow.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a use of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to write parameter controllers for XManager experiments.
An async function decorated with @parameter_controller.controller() would return
an xm.JobGenerator. When added to an experiment, an AUX unit would be started
which would run the given function long-term.
Usage:
from xmanager.contrib import parameter_controller
@parameter_controller.controller(...)
async def my_controller(foo, bar, *, experiment: xm.Experiment) -> None:
experiment.add(...)
...
experiment.add(my_controller(foo=1, bar=2))
"""
import asyncio
import json
import os
import shutil
from typing import Any, Callable, Dict, Optional
from absl import flags
import launchpad as lp
import launchpad.nodes.python.xm_docker as lp_docker
from xmanager import xm
from xmanager import xm_local
def _parameter_controller_job_args(
controller_args: Dict[str, Any]
) -> Dict[str, Any]:
"""Converts given XM flags to coresponding Launchpad's `process_entry` flags.
The XM client runs inside `process_entry`, but flags are not defined yet.
The `flags_to_populate` flag in `process_entry` is used to define the
given flags before calling `app.run()`. This function converts XM flags
to the format desired by `process_entry`.
Args:
controller_args: XM flags to be passed.
Returns:
Flags used to populate XM flags in `process_entry`
"""
args = {
'lp_task_id': 0,
'flags_to_populate': xm.ShellSafeArg(json.dumps(controller_args)),
}
return args
def _to_python_container(
node: lp.PyNode, label: str, docker_config: lp.DockerConfig
) -> xm.PythonContainer:
"""Returns xm.PythonContainer embedding a lp.PyNode."""
return lp_docker.to_docker_executables(
[node], label=label, docker_config=docker_config
)[0][0]
def _use_host_db_config(
package_path: str,
controller_args: Dict[str, Any],
) -> None:
"""Make DB config file used by host available to controller job.
Involves copying the DB config file used by host to the package directory of
the container and then setting the `--xm_db_yaml_config_path` accordingly.
Args:
package_path: The path of the package directory where the DB config should
be copied.
controller_args: The flag mapping used by the controller job. Used to update
`--xm_db_yaml_config_path`.
Raises:
RuntimeError: The `--xm_db_yaml_config` flag will be overriden by this
function. To avoid confusion about this behavior, an exception is thrown
if it's already set.
"""
if 'xm_db_yaml_config_path' in controller_args:
raise RuntimeError(
"Parameter controller can't use host DB config "
'and also use `--xm_db_yaml_config_path` flag. Use '
'`use_host_db_config=False` or remove the flag.'
)
config_path = xm.utils.resolve_path_relative_to_launcher(
flags.FLAGS.xm_db_yaml_config_path
)
if not os.path.isfile(os.path.join(package_path, config_path)):
shutil.copy(config_path, package_path)
controller_args['xm_db_yaml_config_path'] = os.path.basename(config_path)
async def _launch_remote_controller(
aux_unit: xm.ExperimentUnit,
node: lp.PyNode,
function_label: str,
executor: xm.Executor,
controller_name: str,
controller_args: Dict[str, Any],
controller_env_vars: Dict[str, str],
package_path: str,
use_host_db_config: bool,
) -> None:
"""Launches remote Job with the given controller."""
package_path = xm.utils.resolve_path_relative_to_launcher(package_path)
if use_host_db_config:
_use_host_db_config(package_path, controller_args)
docker_requirements = os.path.join(package_path, 'requirements.txt')
docker_config = lp.DockerConfig(package_path, docker_requirements)
executable_spec = _to_python_container(
node, f'{aux_unit.experiment_unit_name}_{function_label}', docker_config
)
[executable] = await asyncio.get_running_loop().run_in_executor(
None,
aux_unit.experiment.package,
[
xm.Packageable(
executable_spec=executable_spec,
executor_spec=executor.Spec(),
)
],
)
controller_job = xm.Job(
name=controller_name,
executable=executable,
executor=executor,
args=_parameter_controller_job_args(controller_args) or {},
env_vars=controller_env_vars or {},
)
aux_unit.add(controller_job)
def _populate_flags(controller_args: Dict[str, Any]) -> None:
"""Sets flag values at runtime.
This is meant to be used in conjunction with the `flags_to_populate`
flags of `process_entry`. Since flag values can't be passed normally,
this function sets values programmatically. This function is meant
to be serialized and run inside `process_entry` through `_controller_body`.
Args:
controller_args: Mapping of flag names and values to be set.
"""
for name, value in controller_args.items():
flags.FLAGS[name].value = value
async def _controller_body(
experiment_id, f, controller_args, *args, **kwargs
) -> None:
_populate_flags(controller_args)
async with xm_local.get_experiment(experiment_id) as experiment:
await f(experiment, *args, **kwargs)
def controller(
*,
executor: xm.Executor,
controller_name: str = 'parameter_controller',
controller_env_vars: Optional[Dict[str, str]] = None,
controller_args: Optional[Dict[str, Any]] = None,
package_path: Optional[str] = '.',
use_host_db_config: bool = True,
):
"""Converts a function to a controller which can be added to an experiment.
Calling the wrapped function would return an xm.JobGenerator which would run
it as auxiliary unit on the specified executor.
Args:
executor: The executor to launch the controller job on.
controller_name: Name of the parameter controller job.
controller_env_vars: Mapping of env variable names and values to be passed
to the parameter controller job.
controller_args: Mapping of flag names and values to be used by the XM
client running inside the parameter controller job.
package_path: Path of directory where the parameter controller container
will be packaged. This directory must contain a `requirements.txt` file
for the job, as well as other things necessary to run the controller job,
like a DB YAML config or things for the to be launched jobs.
use_host_db_config: Specifies if the DB config used by the host should be
copied and used by the parameter controller job. Defaults to True. Can't
be used in conjuction with passing the `xm_db_yaml_config` flag to the
controller job.
Returns:
A decorator to be applied to the function.
"""
def wrap(f: Callable[..., None]) -> Callable[..., xm.JobGeneratorType]:
def make_controller(*args, **kwargs) -> xm.JobType:
async def job_generator(aux_unit: xm.ExperimentUnit) -> None:
experiment_id = aux_unit.experiment.experiment_id
remote_controller = asyncio.create_task(
_launch_remote_controller(
aux_unit,
lp.PyNode(
xm.run_in_asyncio_loop(_controller_body),
experiment_id,
f,
controller_args,
*args,
**kwargs,
),
function_label=f.__name__,
executor=executor,
controller_name=controller_name,
controller_args=controller_args,
controller_env_vars=controller_env_vars,
package_path=package_path,
use_host_db_config=use_host_db_config,
)
)
await remote_controller
return xm.AuxiliaryUnitJob(
job_generator,
importance=xm.Importance.HIGH,
# TODO: Add support for `termination_delay_secs`.
termination_delay_secs=0,
)
return make_controller
return wrap
|
xmanager-main
|
xmanager/contrib/parameter_controller.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to support GCS-based output dir.
For the code running on Google Cloud a GCS directory should be used to store
data.
A GCS name should have a particular format 'gs://bucket/directory'. To
ensure the format of the name is correct, and to suggest reasonable defaults
for it, this file provides a flag and several helper functions.
"""
import datetime
import getpass
import os
from absl import app
from absl import flags
_GCS_PATH = flags.DEFINE_string(
'xm_gcs_path',
None,
(
'A GCS directory within a bucket to store output '
'(in gs://bucket/directory format).'
),
)
_GS_PREFIX = 'gs://'
_GCS_PREFIX = '/gcs/'
_default_bucket_name = '<Your bucket>'
def suggestion(project_name: str) -> str:
"""Returns a suggested GCS dir name for the given @project_name."""
return os.path.join(
_GS_PREFIX,
_default_bucket_name,
getpass.getuser(),
project_name + '-' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S'),
)
def get_gcs_path_or_fail(project_name: str) -> str:
"""Returns value passed in the --xm_gcs_path flag; fails if nothing is passed.
Args:
project_name: a project name used to generate suggested GCS path.
Returns:
If the --xm_gcs_path flag is empty, or contains invalid value, raise an
error. Otherwise, returns a flag value.
"""
if not _GCS_PATH.value:
raise app.UsageError(
'--xm_gcs_path is missing. Suggestion: '
+ f'--xm_gcs_path={suggestion(project_name)}'
)
elif not is_gcs_path(_GCS_PATH.value):
raise app.UsageError(
'--xm_gcs_path not in gs://bucket/directory or /gcs/path format. '
+ f'Suggestion: --xm_gcs_path={suggestion(project_name)}'
)
return str(_GCS_PATH.value)
def is_gs_path(path: str) -> bool:
"""Given the path, checks whether it is a valid Google Storage URL.
Args:
path: a path.
Returns:
True iff a path starts with the 'gs://' prefix.
"""
return path.startswith(_GS_PREFIX)
def is_gcs_fuse_path(path: str) -> bool:
"""Given the path, checks whether it is a valid gcs_fuse path.
Args:
path: a path.
Returns:
True iff a path starts with the '/gcs/' prefix.
"""
return path.startswith(_GCS_PREFIX)
def is_gcs_path(path: str) -> bool:
"""Given the path, checks whether it is a valid GCS URL.
Args:
path: a path.
Returns:
True iff a path starts with either 'gs://' or '/gcs/' prefix.
"""
return is_gs_path(path) or is_gcs_fuse_path(path)
def _gcs_path_no_prefix(path: str) -> str:
"""Given the GCS path in gs://bucket/directory format, strips 'gs://' from it.
Args:
path: GCS path in gs://bucket/directory format.
Returns:
Path without 'gs://' prefix.
"""
if is_gs_path(path):
return path[len(_GS_PREFIX) :]
if is_gcs_fuse_path(path):
return path[len(_GCS_PREFIX) :]
raise ValueError(
f'Path not in gs://bucket/directory or /gcs/path format: {path}'
)
# Exposed for testing.
gcp_website_url = 'https://console.cloud.google.com'
def get_gcs_url(path: str) -> str:
"""Given the GCS path, provides a GCS URL to access it.
Args:
path: GCS path in gs://bucket/directory format.
Returns:
GCS URL to access path.
"""
no_prefix = _gcs_path_no_prefix(path)
return f'{gcp_website_url}/storage/browser/{no_prefix}'
def get_gcs_fuse_path(path: str) -> str:
"""Given the GCS path, provides corresponding Cloud Storage FUSE path.
See https://cloud.google.com/storage/docs/gcs-fuse for details.
Args:
path: GCS path in gs://bucket/directory format.
Returns:
Path in the /gcs/bucket/directory format.
"""
return _GCS_PREFIX + _gcs_path_no_prefix(path)
def get_gs_path(path: str) -> str:
"""Given the GCS path, provides corresponding Cloud Storage URL.
See https://cloud.google.com/storage/docs/gcs-fuse for details.
Args:
path: GCS path in /gcs/bucket/directory format.
Returns:
Path in the gs://bucket/directory format.
"""
return _GS_PREFIX + _gcs_path_no_prefix(path)
|
xmanager-main
|
xmanager/contrib/gcs.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for addressing."""
from absl.testing import absltest
from xmanager.contrib import addressing
class AddressingTest(absltest.TestCase):
def test_k8s_pod_domain(self):
address = addressing.k8s_pod_domain(
job_name='cifar10',
experiment_id=123,
work_unit_id=4,
service='best_service',
namespace='best_namespace',
)
self.assertEqual(
address,
'123-4-cifar10.best_service.best_namespace.svc.cluster.local:2222',
)
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/contrib/addressing_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
from xmanager import xm
from xmanager.contrib import framework_defaults
MLFramework = framework_defaults.MLFramework
class FrameworkDefaultsTest(parameterized.TestCase):
def test_known_frameworks(self):
self.assertEqual(
framework_defaults._get_framework('torch'), MLFramework.PYTORCH
)
self.assertEqual(
framework_defaults._get_framework('pytorch'), MLFramework.PYTORCH
)
self.assertEqual(framework_defaults._get_framework('tf'), MLFramework.TF2)
self.assertEqual(framework_defaults._get_framework('tf1'), MLFramework.TF1)
self.assertEqual(framework_defaults._get_framework('tf2'), MLFramework.TF2)
self.assertEqual(
framework_defaults._get_framework('tensorflow 2.x'), MLFramework.TF2
)
self.assertEqual(framework_defaults._get_framework('jax'), MLFramework.JAX)
self.assertEqual(framework_defaults._get_framework('flax'), MLFramework.JAX)
def test_unknown_frameworks(self):
self.assertEqual(
framework_defaults._get_framework('huggingface'), MLFramework.UNKNOWN
)
self.assertEqual(
framework_defaults._get_framework('objax'), MLFramework.UNKNOWN
)
self.assertEqual(
framework_defaults._get_framework('not a framework name'),
MLFramework.UNKNOWN,
)
@parameterized.named_parameters(
('cpu', None),
('gpu', xm.ResourceType.V100),
('tpu', xm.ResourceType.TPU_V3),
)
def test_jax_base_image(self, accelerator):
base_image = framework_defaults.base_image(MLFramework.JAX, accelerator)
self.assertStartsWith(base_image, 'gcr.io/deeplearning-platform-release/')
# Jax uses CUDA images.
self.assertContainsSubsequence(base_image, 'cu')
@parameterized.named_parameters(
('cpu', None),
('gpu', xm.ResourceType.V100),
('tpu', xm.ResourceType.TPU_V3),
)
def test_tf2_base_image(self, accelerator):
base_image = framework_defaults.base_image(MLFramework.TF2, accelerator)
self.assertStartsWith(base_image, 'gcr.io/deeplearning-platform-release/')
self.assertContainsSubsequence(base_image, 'tf2-')
@parameterized.named_parameters(
('cpu', None),
('gpu', xm.ResourceType.V100),
('tpu', xm.ResourceType.TPU_V3),
)
def test_torch_base_image(self, accelerator):
base_image = framework_defaults.base_image(MLFramework.PYTORCH, accelerator)
self.assertStartsWith(base_image, 'gcr.io/')
self.assertContainsSubsequence(base_image, 'pytorch')
if accelerator in xm.TpuType:
self.assertContainsSubsequence(base_image, 'tpu')
@parameterized.named_parameters(
('cpu', None),
('gpu', xm.ResourceType.V100),
('tpu', xm.ResourceType.TPU_V3),
)
def test_unsupported_tf1_base_image(self, accelerator):
base_image = framework_defaults.base_image(MLFramework.TF1, accelerator)
self.assertStartsWith(base_image, 'gcr.io/deeplearning-platform-release/')
self.assertContainsSubsequence(base_image, 'tf')
@parameterized.named_parameters(
('cpu', None),
('gpu', xm.ResourceType.V100),
('tpu', xm.ResourceType.TPU_V3),
)
def test_unknown_base_image(self, accelerator):
base_image = framework_defaults.base_image(MLFramework.UNKNOWN, accelerator)
self.assertStartsWith(base_image, 'gcr.io/deeplearning-platform-release/')
self.assertContainsSubsequence(base_image, 'base')
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/contrib/framework_defaults_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
xmanager-main
|
xmanager/contrib/__init__.py
|
"""XMFlow-like feature."""
from __future__ import annotations
import asyncio
from collections.abc import Sequence
from typing import Callable, Optional
from absl import logging
from xmanager import xm
from xmanager.contrib import parameter_controller
_Fn = Callable[[xm.Experiment], None]
_AsyncFn = Callable[[], xm.JobGeneratorType]
# The type of `parameter_controller.controller()`
_Controller = Callable[[_Fn], _AsyncFn]
class _UnlaunchedJobError(Exception):
pass
class _UnlaunchedJob:
async def wait_until_complete(self):
raise _UnlaunchedJobError
class StopControllerError(Exception):
pass
def executable_graph(
*,
jobs: dict[str, xm.JobType],
jobs_deps: dict[str, Sequence[str]],
# Have to redefine external symbol to allow both
# `flow.controller` and `flow.executable_graph(controller=)`
controller: Optional[_Controller] = None,
terminate_on_failure: bool = True,
) -> xm.JobGeneratorType:
"""Returns an executable which run the pipeline.
Usage:
```python
from xmanager.contrib import flow
experiment.add(flow.executable_graph(
jobs={
'preprocessing': xm.Job(),
'train': xm.Job(),
'eval': xm.Job(),
'final': xm.Job(),
},
jobs_deps={
'train': ['preprocessing'],
'eval': ['preprocessing'],
'final': ['train', 'eval'],
},
))
```
Args:
jobs: Jobs to run (in order defined by the `jobd_deps` graph). Jobs not
defined in `jobs_deps` are assumed to not have any dependencies and run
directly.
jobs_deps: Mapping job-name to list of job dependencies.
controller: A `flow.controller()` (alias of
`xmanager.contrib.parameter_controller.controller()`) to customize the
executor parameters. If missing, a default executor is used.
terminate_on_failure: If true, terminate upon the the first failure. If
false, continue to launch jobs whose dependencies are successful.
Returns:
The controller to pass to `experiment.add()`
"""
# Normalize the graph by adding missing values
for job_name in jobs:
jobs_deps.setdefault(job_name, [])
_assert_valid_graph(jobs_deps=jobs_deps, jobs=jobs)
log(f'jobs: {list(jobs)}')
log(f'jobs_deps: {jobs_deps}')
controller = controller or parameter_controller.controller(
)
@controller
async def run_graphs(experiment: xm.Experiment) -> None:
jobs_launched = {job_name: asyncio.Future() for job_name in jobs}
async def job_finished(job_name: str) -> bool:
log(f'`Waiting for {job_name}` to be added to `experiment.add`')
op = await jobs_launched[job_name] # Wait for the `experiment.add`
try:
log(f'`{job_name}` is running, waiting to finish')
await op.wait_until_complete() # Wait for the job to complete
except (xm.ExperimentUnitError, _UnlaunchedJobError) as e:
log(f'`{job_name}` has failed.')
if terminate_on_failure:
raise StopControllerError() from e
return False
else:
log(f'`{job_name}` has finished successfully.')
return True
async def launch_single_job(job_name: str) -> None:
log(f'`Launching: {job_name}`, waiting for all the deps to schedule.')
# Wait for all the deps to complete
deps_finished = await asyncio.gather(
*(job_finished(dep) for dep in jobs_deps[job_name])
)
# Schedule the job
log(f'`All deps finished for: {job_name}`. Launching...')
if all(deps_finished):
op = await experiment.add(jobs[job_name], identity=job_name)
else:
op = _UnlaunchedJob()
log(f'`{job_name}` launched. Notify other waiting jobs...')
# Notify other waiting jobs
jobs_launched[job_name].set_result(op)
log(f'`{job_name}` complete.')
try:
await asyncio.gather(*(launch_single_job(job_name) for job_name in jobs))
except StopControllerError as e:
log(str(e))
# This is expected, so exit normally.
return
return run_graphs() # pylint: disable=no-value-for-parameter
def _quote_name(x: str) -> str:
"""Return a quoted name for a node in a graphviz graph."""
return '"' + x.replace('"', '\\"') + '"'
def _make_dot_graph_url(jobs_deps: dict[str, Sequence[str]]) -> str:
# First add all leaf (potential singleton)
terms = [_quote_name(j) for j, deps in jobs_deps.items() if not deps]
for job_name, job_deps in jobs_deps.items():
for dep in job_deps:
terms.append(f'{_quote_name(dep)}->{_quote_name(job_name)}')
dot = 'digraph{{{}}}'.format(' '.join(terms))
return dot
def _assert_valid_graph(
*,
jobs: dict[str, xm.Job],
jobs_deps: dict[str, Sequence[str]],
):
"""Validate the jobs are valid."""
all_job_names = set()
for j, jd in jobs_deps.items():
all_job_names.add(j)
all_job_names.update(jd)
if extra_jobs := sorted(all_job_names - set(jobs)):
raise ValueError(
'Invalid `jobs_deps`: Some dependencies are not present in the '
f'`jobs=`: {extra_jobs}'
)
# Could also detect cycles, but likely over-engineered
def log(msg: str) -> None:
"""Log messages."""
logging.info(msg)
|
xmanager-main
|
xmanager/contrib/flow.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to create universal launchers.
This module provides an `--xm_launch_mode` flag to specify a desired executor
and two helper functions which return specific implementations of xm.Executor
and `create_experiment()`.
Usage:
Example 1 (simple executor tuning):
```
from xmanager.contrib import executor_selector
...
requirements = xm.JobRequirements(...)
executor = executor_selector.get_executor()(requirements=requirements)
...
with executor_selector.create_experiment(experiment_title='Run') as experiment:
...
```
Example 2 (involved executor tuning):
```
from xmanager.contrib import executor_selector
...
executor_fn = executor_selector.get_executor()
kwargs = {}
if isinstance(executor, xm_local.Vertex):
# Fill in Vertex executor args
else:
# Fill in Local executor args
```
executor = executor_fn(**kwargs)
"""
import enum
from typing import Callable, List, Optional, Union
from absl import flags
from xmanager import xm
from xmanager import xm_local
class XMLaunchMode(enum.Enum):
"""Specifies an executor to run an experiment."""
VERTEX = 'vertex'
LOCAL = 'local'
INTERACTIVE = 'interactive'
_XM_LAUNCH_MODE = flags.DEFINE_enum_class(
'xm_launch_mode',
XMLaunchMode.VERTEX,
XMLaunchMode,
'How to launch the experiment. Supports local and interactive execution, '
+ 'launch on '
+
'Vertex.',
)
def launch_mode() -> XMLaunchMode:
return _XM_LAUNCH_MODE.value
def create_experiment(
experiment_title: Optional[str] = None,
mode: Optional[XMLaunchMode] = None,
) -> xm.Experiment:
"""Creates an experiment depending on the launch mode.
Args:
experiment_title: Title of an experiment
mode: Specifies which experiment to create. If None, the '--xm_launch_mode'
flag value is used.
Returns:
A newly created experiment.
Raises:
ValueError: if provided `mode` is unknown.
"""
if mode is None:
mode = launch_mode()
if mode in (
XMLaunchMode.LOCAL,
XMLaunchMode.INTERACTIVE,
XMLaunchMode.VERTEX,
):
# TODO: add import here?
return xm_local.create_experiment(experiment_title)
raise ValueError(f'Unknown launch mode: {mode}')
def _local_executor(interactive: bool) -> Callable[..., xm.Executor]:
"""Helper to provide a local executor with appropriate `interactive` flag."""
def setup_local(*args, **kwargs_in) -> xm_local.Local:
kwargs = {}
# Copy supported arguments.
if 'experimental_stream_output' in kwargs_in:
kwargs['experimental_stream_output'] = kwargs_in[
'experimental_stream_output'
]
# Set the specified value of `interactive`.
docker_options = kwargs_in.get('docker_options', xm_local.DockerOptions())
setattr(docker_options, 'interactive', interactive)
kwargs['docker_options'] = docker_options
return xm_local.Local(*args, **kwargs)
return setup_local
def get_executor(
mode: Optional[XMLaunchMode] = None,
) -> Callable[..., xm.Executor]:
"""Select an `xm.Executor` specialization depending on the launch mode.
Args:
mode: Specifies which class to select. If None, the '--xm_launch_mode' flag
value is used.
Returns:
An executor constructor.
Raises:
ValueError: if provided `mode` is unknown.
"""
if mode is None:
mode = launch_mode()
if mode == XMLaunchMode.VERTEX:
return xm_local.Caip
if mode == XMLaunchMode.LOCAL or mode == XMLaunchMode.INTERACTIVE:
return _local_executor(mode == XMLaunchMode.INTERACTIVE)
raise ValueError(f'Unknown launch mode: {mode}')
|
xmanager-main
|
xmanager/contrib/executor_selector.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper module for using TPUs."""
from typing import List
# pylint: disable=line-too-long
def tpuvm_docker_instructions() -> List[str]:
return [
(
'RUN wget'
' https://storage.googleapis.com/cloud-tpu-tpuvm-artifacts/libtpu/20210525/libtpu.so'
' -O /lib/libtpu.so'
),
'RUN chmod 700 /lib/libtpu.so',
(
'RUN wget '
'https://storage.googleapis.com/cloud-tpu-tpuvm-artifacts/tensorflow/20210525/tf_nightly-2.6.0-cp38-cp38-linux_x86_64.whl'
),
'RUN pip3 install tf_nightly-2.6.0-cp38-cp38-linux_x86_64.whl',
'RUN rm tf_nightly-2.6.0-cp38-cp38-linux_x86_64.whl',
]
|
xmanager-main
|
xmanager/contrib/tpu.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatic selection of reasonable defaults based on input.
This library aimed to provide recommended defaults for the choices that can be
automated depending on the input. In particular, the choice of a base image
depends on a framework and accelerator used for the experiment, and can change
if a different accelerator is used. This module provides function
`base_image`, which returns the recommended base image to use without the need
to change the launcher's code.
Please note that the recommendations are subject to change as we are trying to
use the latest supported images for all frameworks. If a change in the
recommendation breaks your code, try to replace this call with the explicit
base image name.
"""
import enum
from typing import Optional, Union
from absl import logging
from xmanager import xm
class MLFramework(enum.Enum):
"""ML Framework used in the experiment code."""
UNKNOWN = 0
JAX = 1
PYTORCH = 2
TF2 = 3
TF1 = 4 # Unsupported
FrameworkSpec = Union[str, MLFramework]
def _get_framework(framework: str) -> MLFramework:
"""Given a framework name in a loose form, returns the most suitable enum."""
if framework == 'jax' or framework == 'flax':
return MLFramework.JAX
if 'torch' in framework:
return MLFramework.PYTORCH
if 'tf' in framework or 'tensorflow' in framework:
# A variant of a Tensorflow.
return MLFramework.TF1 if '1' in framework else MLFramework.TF2
# Framework not recognized.
logging.error('Unrecognized framework "%s"', framework)
return MLFramework.UNKNOWN
def base_image(
framework: FrameworkSpec, accelerator: Optional[xm.ResourceType]
) -> str:
"""Returns a base image recommendation depending on the input.
Please note that the recommendations can change as we are trying to recommend
the latest supported images for all frameworks. Currently most of the images
are taken from
https://cloud.google.com/deep-learning-containers/docs/choosing-container.
Args:
framework: a free-text framework name. Recognized options are `jax`,
`pytorch`, `tf`, `tensorflow`. The enum value is also accepted.
accelerator: Accelerator specification from xm.JobRequirements. If None,
then execution on CPU is assumed.
Returns:
a name of a base image to pass to e.g. xm.python_container.
"""
if isinstance(framework, str):
framework = _get_framework(framework)
# LINT.IfChange(base_image_version)
if framework == MLFramework.JAX:
# JAX-based experiment use the same base image for all accelerators.
return 'gcr.io/deeplearning-platform-release/base-cu113'
elif framework == MLFramework.TF2:
# TF experiments use the same base image for all accelerators.
return 'gcr.io/deeplearning-platform-release/tf2-gpu.2-6'
elif framework == MLFramework.PYTORCH:
if accelerator in xm.TpuType:
# Base image is taken from pytorch / XLA documentation.
# https://github.com/pytorch/xla#-available-images-and-wheels
return 'gcr.io/tpu-pytorch/xla:nightly_3.8_tpuvm_20220819'
else:
return 'gcr.io/deeplearning-platform-release/pytorch-gpu.1-12'
# LINT.ThenChange(:tpu_runtime_version)
elif framework == MLFramework.TF1:
logging.warning('Tensorflow 1.x is not supported')
return 'gcr.io/deeplearning-platform-release/tf-gpu.1-15'
else:
# Unrecognized framework: use the default CUDA image.
return 'gcr.io/deeplearning-platform-release/base-cu113'
|
xmanager-main
|
xmanager/contrib/framework_defaults.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for getting the address of XManager jobs.
Addresses in XManager can be statically evaluated because the experiment ID is
known. Addressing should not involve tokens or late-bindings.
"""
def k8s_pod_domain(
job_name: str,
experiment_id: int,
work_unit_id: int,
service: str = 'experiments',
namespace: str = 'default',
) -> str:
"""Returns the Kubernetes pod address of a job.
Args:
job_name: Job name.
experiment_id: Experiment ID.
work_unit_id: Work unit ID
service: Name of the service for the job. Defaults to 'experiments'
namespace: Namespace of the job. Defaults to 'default'
"""
return (
f'{experiment_id}-{work_unit_id}-{job_name}'
f'.{service}.{namespace}.svc.cluster.local:2222'
)
|
xmanager-main
|
xmanager/contrib/addressing.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xm_tensorflow."""
import json
from typing import cast
from absl.testing import absltest
from absl.testing import parameterized
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import addressing
from xmanager.contrib import xm_tensorflow
class XmTensorflowTest(parameterized.TestCase):
@parameterized.product(num_workers=[0, 2, 4])
def test_kubernetes_multiworker_strategy(self, num_workers):
experiment = absltest.mock.MagicMock()
type(experiment).experiment_id = absltest.mock.PropertyMock(
return_value=123
)
work_unit = absltest.mock.MagicMock()
type(work_unit).work_unit_id = absltest.mock.PropertyMock(return_value=42)
executable = absltest.mock.MagicMock()
executor = absltest.mock.MagicMock(spec=xm_local.Kubernetes)
hparams = xm.SequentialArgs.from_collection({'a': 'b'})
worker_name = 'best_worker'
builder = xm_tensorflow.MultiWorkerMirroredStrategyBuilder(
experiment=experiment,
worker_executable=executable,
worker_executor=executor,
num_workers=num_workers,
worker_name=worker_name,
)
job_group = builder.create_job_group(hparams=hparams, work_unit=work_unit)
expected_job_names = [f'{worker_name}-{i}' for i in range(num_workers)]
expected_worker_domains = [
addressing.k8s_pod_domain(
job_name=job_name, experiment_id=123, work_unit_id=42
)
for job_name in expected_job_names
]
self.assertSameElements(expected_job_names, list(job_group.jobs.keys()))
for i, job_name in enumerate(expected_job_names):
self.assertIsInstance(job_group.jobs[job_name], xm.Job)
job = cast(xm.Job, job_group.jobs[job_name])
self.assertEqual(job.executable, executable)
self.assertEqual(job.executor, executor)
self.assertEqual(job.args, xm.SequentialArgs.from_collection(hparams))
tf_config = {
'cluster': {'worker': expected_worker_domains},
'task': {'type': 'worker', 'index': i},
}
self.assertEqual(job.env_vars, {'TF_CONFIG': json.dumps(tf_config)})
@parameterized.product(num_workers=[0, 2], num_ps=[0, 2])
def test_kubernetes_parameter_server_strategy(self, num_workers, num_ps):
experiment = absltest.mock.MagicMock()
type(experiment).experiment_id = absltest.mock.PropertyMock(
return_value=123
)
work_unit = absltest.mock.MagicMock()
type(work_unit).work_unit_id = absltest.mock.PropertyMock(return_value=42)
chief_executable = absltest.mock.MagicMock()
chief_executor = absltest.mock.MagicMock(spec=xm_local.Kubernetes)
worker_executable = absltest.mock.MagicMock()
worker_executor = absltest.mock.MagicMock(spec=xm_local.Kubernetes)
ps_executable = absltest.mock.MagicMock()
ps_executor = absltest.mock.MagicMock(spec=xm_local.Kubernetes)
chief_name = 'best_chief'
worker_name = 'best_worker'
ps_name = 'best_ps'
hparams = xm.SequentialArgs.from_collection({'a': 'b'})
builder = xm_tensorflow.ParameterServerStrategyBuilder(
experiment,
chief_executable,
chief_executor,
worker_executable,
worker_executor,
ps_executable,
ps_executor,
chief_name=chief_name,
worker_name=worker_name,
ps_name=ps_name,
num_workers=num_workers,
num_ps=num_ps,
)
job_group = builder.create_job_group(work_unit=work_unit, hparams=hparams)
expected_worker_job_names = [
f'{worker_name}-{i}' for i in range(num_workers)
]
expected_ps_job_names = [f'{ps_name}-{i}' for i in range(num_ps)]
self.assertSameElements(
[chief_name] + expected_worker_job_names + expected_ps_job_names,
list(job_group.jobs.keys()),
)
expected_chief_domain = addressing.k8s_pod_domain(
chief_name, experiment_id=123, work_unit_id=42
)
expected_worker_domains = [
addressing.k8s_pod_domain(worker_name, 123, 42)
for worker_name in expected_worker_job_names
]
expected_ps_domains = [
addressing.k8s_pod_domain(ps_name, 123, 42)
for ps_name in expected_ps_job_names
]
for job in job_group.jobs.values():
self.assertIsInstance(job, xm.Job)
# Test chief
job = cast(xm.Job, job_group.jobs[chief_name])
def _create_expected_tf_config(task_type, task_index):
return {
'cluster': {
'chief': [expected_chief_domain],
'worker': expected_worker_domains,
'ps': expected_ps_domains,
},
'task': {
'type': task_type,
'index': task_index,
},
}
self.assertEqual(job.executable, chief_executable)
self.assertEqual(job.executor, chief_executor)
self.assertEqual(
job.env_vars,
{'TF_CONFIG': json.dumps(_create_expected_tf_config('chief', 0))},
)
# Test workers
for i in range(num_workers):
worker_job_name = expected_worker_job_names[i]
job = cast(xm.Job, job_group.jobs[worker_job_name])
self.assertEqual(job.executable, worker_executable)
self.assertEqual(job.executor, worker_executor)
self.assertEqual(job.args, xm.SequentialArgs.from_collection(hparams))
self.assertEqual(
job.env_vars,
{'TF_CONFIG': json.dumps(_create_expected_tf_config('worker', i))},
)
# Test parameter servers
for i in range(num_ps):
ps_job_name = expected_ps_job_names[i]
job = cast(xm.Job, job_group.jobs[ps_job_name])
self.assertEqual(job.executable, ps_executable)
self.assertEqual(job.executor, ps_executor)
self.assertEqual(
job.env_vars,
{'TF_CONFIG': json.dumps(_create_expected_tf_config('ps', i))},
)
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/contrib/xm_tensorflow_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the xmanager.contrib.tensoboard module."""
from typing import Any, Mapping
from absl.testing import absltest
from absl.testing import parameterized
from xmanager import xm
from xmanager import xm_local
from xmanager.contrib import tensorboard
class TensorboardTest(parameterized.TestCase):
@parameterized.product(
executor=[xm_local.Vertex(), xm_local.Kubernetes(), xm_local.Local()],
logdir=['logs'],
args=[{}, None, {'new': 0}],
)
def test_add_tensorboard_negative_timeout(
self, executor: xm.Executor, logdir: str, args: Mapping[str, Any]
):
mock_experiment = absltest.mock.Mock()
mock_experiment.add.return_value = None
mock_executable = absltest.mock.Mock()
mock_experiment.package.return_value = [mock_executable]
with self.assertRaises(RuntimeError):
tensorboard.add_tensorboard(
mock_experiment,
logdir=logdir,
executor=executor,
timeout_secs=-5,
args=args,
)
@parameterized.product(
executor=[xm_local.Vertex(), xm_local.Kubernetes(), xm_local.Local()],
logdir=['logs'],
timeout_secs=[0, 5],
args=[{}, None, {'new': 0}],
)
def test_add_tensorboard(
self,
executor: xm.Executor,
logdir: str,
timeout_secs: int,
args: Mapping[str, Any],
):
mock_experiment = absltest.mock.Mock()
mock_experiment.add.return_value = None
mock_executable = absltest.mock.Mock()
mock_experiment.package.return_value = [mock_executable]
tensorboard.add_tensorboard(
mock_experiment,
logdir=logdir,
executor=executor,
timeout_secs=timeout_secs,
args=args,
)
expected_packageable_spec_arg = (
tensorboard.TensorboardProvider.get_tensorboard_packageable(
timeout_secs=timeout_secs
)
)
mock_experiment.package.assert_called_once()
packageable_arg = mock_experiment.package.call_args[0][0][0]
self.assertEqual(
packageable_arg.executable_spec, expected_packageable_spec_arg
)
self.assertEqual(packageable_arg.executor_spec, executor.Spec())
expected_job = xm.Job(
executable=mock_executable,
executor=executor,
args=tensorboard.TensorboardProvider.get_tensorboard_job_args(
logdir, additional_args=args
),
name='tensorboard',
)
mock_experiment.add.assert_called_once()
auxiliary_job_arg = mock_experiment.add.call_args[0][0]
self.assertIsInstance(auxiliary_job_arg, xm.AuxiliaryUnitJob)
self.assertEqual(auxiliary_job_arg._job, expected_job)
def test_get_tensorboard_packageable_negative_timeout(self):
provider = tensorboard.TensorboardProvider
with self.assertRaises(RuntimeError):
provider.get_tensorboard_packageable(timeout_secs=-5)
@parameterized.product(timeout_secs=[0, 5])
def test_get_tensorboard_packageable(self, timeout_secs: int):
provider = tensorboard.TensorboardProvider
packageable = provider.get_tensorboard_packageable(
timeout_secs=timeout_secs
)
self.assertIsInstance(packageable, xm.PythonContainer)
self.assertEqual(packageable.base_image, 'tensorflow/tensorflow')
@parameterized.product(
log_dir=['logs'],
port=[None, 6006, 2002],
additional_args=[{}, None, {'logdir': 'logs_v2'}, {'new': 0}],
)
def test_get_tensorboard_job_args(
self, log_dir: str, port: int, additional_args: Mapping[str, Any]
):
provider = tensorboard.TensorboardProvider
args = provider.get_tensorboard_job_args(log_dir, port, additional_args)
self.assertDictContainsSubset(
dict(
{
'logdir': log_dir,
'port': port or provider.DEFAULT_TENSORBOARD_PORT,
},
**(additional_args if additional_args else {}),
),
args,
)
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/contrib/tensorboard_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for running Tensorboard from the client."""
from typing import Any, Mapping, Optional
from xmanager import xm
class TensorboardProvider:
"""A class to generate package and job/args to Tensorboard jobs."""
DEFAULT_TENSORBOARD_PORT = 6006
@staticmethod
def get_tensorboard_packageable(timeout_secs: int) -> xm.PythonContainer:
"""Creates container spec running TensorBoard server.
Args:
timeout_secs: Seconds to run the server for. Note that a value of 0
disables the associated timeout.
Raises:
RuntimeError: `timeout_secs` is negative.
Returns:
Spec of container running TensorBoard server for a specified
period of time.
"""
if timeout_secs < 0:
raise RuntimeError('`timeout_secs` must be a nonnegative number')
return xm.PythonContainer(
base_image='tensorflow/tensorflow',
entrypoint=xm.CommandList([f'timeout {timeout_secs}s tensorboard']),
)
@staticmethod
def get_tensorboard_job_args(
log_dir: str,
port: Optional[int] = None,
additional_args: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
"""Get arguments to start a Tensorboard job."""
args = {
'logdir': log_dir,
'port': port or TensorboardProvider.DEFAULT_TENSORBOARD_PORT,
# Allows accessing visualisations from Docker container running locally.
'host': '0.0.0.0',
# This is set to true by default when running Tensorboard.
# Since it doesn't seem to be working well with GKE Workload Identity,
# we set it to false for now. Can be overriden through the
# `additional_args` param.
#
# https://github.com/tensorflow/tensorboard/issues/4784#issuecomment-868945650
'load_fast': 'false',
}
if additional_args:
args.update(additional_args)
return args
def add_tensorboard(
experiment: xm.Experiment,
logdir: str,
executor: xm.Executor,
timeout_secs: int = 60 * 60 * 24,
args: Optional[Mapping[str, Any]] = None,
) -> None:
"""Self-contained function which adds a Tensorboard auxiliary job to @experiment."""
provider = TensorboardProvider
[executable] = experiment.package(
[
xm.Packageable(
provider.get_tensorboard_packageable(timeout_secs=timeout_secs),
executor.Spec(),
)
]
)
job = xm.Job(
executable,
executor,
args=provider.get_tensorboard_job_args(logdir, additional_args=args),
name='tensorboard',
)
# TODO: Add support for `termination_delay_secs`.
experiment.add(xm.AuxiliaryUnitJob(job, termination_delay_secs=0))
|
xmanager-main
|
xmanager/contrib/tensorboard.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for communicating with the Bazel server."""
import abc
from typing import List, Sequence, Tuple
import attr
class BazelService(abc.ABC):
"""An interface for Bazel operations."""
@abc.abstractmethod
def fetch_kinds(self, labels: Sequence[str]) -> List[str]:
"""Fetches kinds of given targets.
See https://docs.bazel.build/versions/main/query.html#output-label_kind.
Args:
labels: Labels of the targets to query.
Returns:
A list of kinds, for example, `['py_binary rule']`.
"""
raise NotImplementedError
@abc.abstractmethod
def build_targets(
self, labels: Sequence[str], tail_args: Sequence[str]
) -> List[List[str]]:
"""Builds given targets and returns paths to their important outputs.
Args:
labels: Labels of the targets to build.
tail_args: Arguments to append to the Bazel command.
Returns:
For each label returns a list of its important outputs.
"""
raise NotImplementedError
def _to_tuple(sequence: Sequence[str]) -> Tuple[str, ...]:
"""A standalone function to satisfy PyType."""
return tuple(sequence)
@attr.s(auto_attribs=True, frozen=True)
class BazelTarget:
"""A Bazel target to be built."""
label: str
bazel_args: Tuple[str, ...] = attr.ib(converter=_to_tuple)
|
xmanager-main
|
xmanager/bazel/client.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of tools for files operations."""
import os
import tempfile
class TemporaryFilePath:
"""A context manager providing a temporary file path.
Unlike NamedTemporaryFile, TemporaryFilePath closes the file when one enters
the context.
"""
_path: str
def __enter__(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self._path = path
return path
def __exit__(self, error_type, error_value, traceback):
os.remove(self._path)
|
xmanager-main
|
xmanager/bazel/file_utils.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to authenticate with GCP."""
import functools
import os
from typing import Any, Iterable, Dict
from absl import flags
from google import auth
from googleapiclient import discovery
from googleapiclient import errors
_DEFAULT_SCOPES = ('https://www.googleapis.com/auth/cloud-platform',)
_GCP_SERVICE_ACCOUNT_NAME = flags.DEFINE_string(
'xm_gcp_service_account_name',
'xmanager',
(
'Specifies the user-managed service account name to be used by XManager'
'Note that user-managed service accounts have the following format: '
'`{service-account-name}@{project-id}.iam.gserviceaccount.com`, so only'
'the part before @ is required'
),
)
def get_project_name() -> str:
"""Gets the Project ID of the GCP Project."""
_, project = auth.default()
return project
def get_project_number() -> str:
"""Gets the Project Number of the GCP Project."""
rm = discovery.build('cloudresourcemanager', 'v1')
response = rm.projects().get(projectId=get_project_name()).execute()
return response['projectNumber']
def get_creds(scopes: Iterable[str] = _DEFAULT_SCOPES):
"""Gets the google credentials to be used with GCP APIs."""
creds, _ = auth.default(scopes=scopes)
return creds
# The @lru_cache decorator causes this to only be run once per Python session.
@functools.lru_cache()
def enable_apis():
"""Enables APIs on the GCP Project."""
resource = discovery.build('serviceusage', 'v1')
body = {
'serviceIds': [
'aiplatform.googleapis.com',
'cloudbuild.googleapis.com',
'cloudresourcemanager.googleapis.com',
'compute.googleapis.com',
'container.googleapis.com',
'containerregistry.googleapis.com',
'iam.googleapis.com',
'logging.googleapis.com',
'storage-api.googleapis.com',
'tpu.googleapis.com',
]
}
resource.services().batchEnable(
parent=f'projects/{get_project_number()}', body=body
).execute()
def get_service_account() -> str:
"""Gets or creates the service account for running XManager in GCP.
The default Vertex AI Training Service Agent has limited scopes. It is more
useful to use a custom service account that can access a greater number of
GCP APIs.
The `--xm_gcp_service_account_name` flag can be used to specify a user-managed
service account to be used. If not specified, defaults to `xmanager`.
Returns:
The service account email.
Raises:
HttpError: if the response was not a 2xx or 403.
"""
service_account_name = _GCP_SERVICE_ACCOUNT_NAME.value
service_account = (
f'{service_account_name}@{get_project_name()}.iam.gserviceaccount.com'
)
try:
_maybe_create_service_account(service_account)
_maybe_grant_service_account_permissions(service_account)
except errors.HttpError as e:
# A 403 implies that the user is not an IAM Admin or Cloud Resource Manager
# API is not enabled.
# The project admin probably already has set up IAM roles, so the former
# check can be skipped.
if e.resp.status != 403 or 'cloudresourcemanager.googleapis.com' in e.uri:
raise e
return service_account
def _maybe_create_service_account(service_account: str) -> None:
"""Creates the default service account if it does not exist."""
iam = discovery.build('iam', 'v1')
accounts = (
iam.projects()
.serviceAccounts()
.list(name='projects/' + get_project_name())
.execute()
)
for account in accounts.get('accounts', []):
if account['email'] == service_account:
return
# https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts
body = {
'accountId': service_account.split('@')[0],
'serviceAccount': {
'displayName': service_account.split('@')[0],
'description': 'XManager service account',
},
}
accounts = (
iam.projects()
.serviceAccounts()
.create(name='projects/' + get_project_name(), body=body)
.execute()
)
def _maybe_grant_service_account_permissions(service_account: str) -> None:
"""Grants the default service account IAM permissions if necessary."""
rm = discovery.build('cloudresourcemanager', 'v1')
policy = rm.projects().getIamPolicy(resource=get_project_name()).execute()
want_roles = ['roles/aiplatform.user', 'roles/storage.admin']
should_set = False
for role in want_roles:
member = 'serviceAccount:' + service_account
changed = _add_member_to_iam_policy(policy, role, member)
should_set = should_set or changed
if not should_set:
return None
body = {'policy': policy}
rm.projects().setIamPolicy(resource=get_project_name(), body=body).execute()
def _add_member_to_iam_policy(
policy: Dict[str, Any], role: str, member: str
) -> bool:
"""Modifies the IAM policy to add the member with the role."""
for i, binding in enumerate(policy['bindings']):
if binding['role'] == role:
if member in binding['members']:
return False
policy['bindings'][i]['members'].append(member)
return True
policy['bindings'].append({'role': role, 'members': [member]})
return True
def get_bucket() -> str:
bucket = os.environ.get('GOOGLE_CLOUD_BUCKET_NAME', None)
if bucket:
return bucket
raise ValueError(
'$GOOGLE_CLOUD_BUCKET_NAME is undefined. Run '
'`export GOOGLE_CLOUD_BUCKET_NAME=<bucket-name>`, '
'replacing <bucket-name> with a Google Cloud Storage bucket. '
'You can create a bucket with '
'`gsutil mb -l us-central1 gs://$GOOGLE_CLOUD_BUCKET_NAME`'
)
|
xmanager-main
|
xmanager/cloud/auth.py
|
"""Tests for auth."""
import sys
import unittest
from unittest import mock
from absl import flags
from absl.testing import parameterized
from googleapiclient import discovery
from xmanager.cloud import auth
_TEST_SERVICE_ACCOUNT_NAME = 'test-service-account'
_DEFAULT_SERVICE_ACCOUNT_NAME = 'xmanager'
_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS = [
{
'sys_argv': sys.argv,
'expected_account_name': _DEFAULT_SERVICE_ACCOUNT_NAME,
},
{
'sys_argv': [
*sys.argv,
f'--xm_gcp_service_account_name={_TEST_SERVICE_ACCOUNT_NAME}',
],
'expected_account_name': _TEST_SERVICE_ACCOUNT_NAME,
},
]
class GetServiceAccountTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Resets flags between runs
flags.FLAGS.unparse_flags()
@parameterized.parameters(_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS)
def test_get_service_account_existing_account(
self, sys_argv, expected_account_name
):
"""Tests that `get_service_account` does nothing on a properly configured account."""
flags.FLAGS(sys_argv)
mock_service_accounts = mock.Mock()
mock_service_accounts.list.return_value.execute.return_value = {
'accounts': [
{
'email': f'{expected_account_name}@test-project.iam.gserviceaccount.com'
}
]
}
mock_service_accounts.create.return_value.execute.return_value = None
mock_projects = mock.Mock()
mock_projects.serviceAccounts.return_value = mock_service_accounts
mock_projects.getIamPolicy.return_value.execute.return_value = {
'bindings': [
{
'role': 'roles/aiplatform.user',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
{
'role': 'roles/storage.admin',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
]
}
mock_discovery_build = mock.Mock()
mock_discovery_build.projects.return_value = mock_projects
with mock.patch.object(
auth, 'get_project_name', return_value='test-project'
), mock.patch.object(discovery, 'build', return_value=mock_discovery_build):
auth.get_service_account()
mock_service_accounts.create.assert_not_called()
mock_projects.setIamPolicy.assert_not_called()
@parameterized.parameters(_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS)
def test_get_service_account_new_account(
self, sys_argv, expected_account_name
):
"""Tests if `get_service_account` creates a new account and permissions properly."""
flags.FLAGS(sys_argv)
mock_service_accounts = mock.Mock()
mock_service_accounts.list.return_value.execute.return_value = {}
mock_service_accounts.create.return_value.execute.return_value = None
mock_projects = mock.Mock()
mock_projects.serviceAccounts.return_value = mock_service_accounts
mock_projects.getIamPolicy.return_value.execute.return_value = {
'bindings': []
}
mock_discovery_build = mock.Mock()
mock_discovery_build.projects.return_value = mock_projects
with mock.patch.object(
auth, 'get_project_name', return_value='test-project'
), mock.patch.object(discovery, 'build', return_value=mock_discovery_build):
auth.get_service_account()
mock_service_accounts.create.assert_called_once_with(
name='projects/test-project',
body={
'accountId': expected_account_name,
'serviceAccount': {
'displayName': expected_account_name,
'description': 'XManager service account',
},
},
)
mock_projects.setIamPolicy.assert_called_once_with(
resource='test-project',
body={
'policy': {
'bindings': [
{
'role': 'roles/aiplatform.user',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
{
'role': 'roles/storage.admin',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
]
}
},
)
@parameterized.parameters(_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS)
def test_get_service_account_no_permissions(
self, sys_argv, expected_account_name
):
"""Tests if `get_service_account` creates permissions properly for an existing account with no permissions."""
flags.FLAGS(sys_argv)
mock_service_accounts = mock.Mock()
mock_service_accounts.list.return_value.execute.return_value = {
'accounts': [
{
'email': f'{expected_account_name}@test-project.iam.gserviceaccount.com'
}
]
}
mock_service_accounts.create.return_value.execute.return_value = None
mock_projects = mock.Mock()
mock_projects.serviceAccounts.return_value = mock_service_accounts
mock_projects.getIamPolicy.return_value.execute.return_value = {
'bindings': []
}
mock_discovery_build = mock.Mock()
mock_discovery_build.projects.return_value = mock_projects
with mock.patch.object(
auth, 'get_project_name', return_value='test-project'
), mock.patch.object(discovery, 'build', return_value=mock_discovery_build):
auth.get_service_account()
mock_service_accounts.create.assert_not_called()
mock_projects.setIamPolicy.assert_called_once_with(
resource='test-project',
body={
'policy': {
'bindings': [
{
'role': 'roles/aiplatform.user',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
{
'role': 'roles/storage.admin',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
]
}
},
)
@parameterized.parameters(_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS)
def test_get_service_account_some_permissions(
self, sys_argv, expected_account_name
):
"""Tests if `get_service_account` creates permissions properly for an existing account with some permissions."""
flags.FLAGS(sys_argv)
mock_service_accounts = mock.Mock()
mock_service_accounts.list.return_value.execute.return_value = {
'accounts': [
{
'email': (
f'{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
)
},
{
# Used to test that permissions of other users are not affected
'email': 'someone_else@test-project.iam.gserviceaccount.com'
},
]
}
mock_service_accounts.create.return_value.execute.return_value = None
mock_projects = mock.Mock()
mock_projects.serviceAccounts.return_value = mock_service_accounts
mock_projects.getIamPolicy.return_value.execute.return_value = {
'bindings': [
{
'role': 'roles/aiplatform.user',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
{
'role': 'roles/storage.admin',
'members': [
'someone-else@test-project.iam.gserviceaccount.com'
],
},
]
}
mock_discovery_build = mock.Mock()
mock_discovery_build.projects.return_value = mock_projects
with mock.patch.object(
auth, 'get_project_name', return_value='test-project'
), mock.patch.object(discovery, 'build', return_value=mock_discovery_build):
auth.get_service_account()
mock_service_accounts.create.assert_not_called()
mock_projects.setIamPolicy.assert_called_once_with(
resource='test-project',
body={
'policy': {
'bindings': [
{
'role': 'roles/aiplatform.user',
'members': [
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
],
},
{
'role': 'roles/storage.admin',
'members': [
'someone-else@test-project.iam.gserviceaccount.com',
(
f'serviceAccount:{expected_account_name}'
'@test-project.iam.gserviceaccount.com'
),
],
},
]
}
},
)
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/cloud/auth_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xmanager.cloud.utils."""
import os
import tempfile
import unittest
from xmanager.cloud import utils
_CLUSTER_SPEC = """{
"cluster": {
"workerpool0": ["cmle-training-workerpool0-ab-0:2222"],
"workerpool1": ["cmle-training-workerpool1-ab-0:2222", "cmle-training-workerpool1-ab-1:2222"],
"workerpool2": ["cmle-training-workerpool2-ab-0:2222", "cmle-training-workerpool2-ab-1:2222"]
},
"environment": "cloud",
"task": {
"type": "workerpool1",
"index": 1,
"trial": ""
}
}""".replace(
'\n', ' '
)
class UtilsTest(unittest.TestCase):
def tearDown(self):
super(UtilsTest, self).tearDown()
os.environ['CLUSTER_SPEC'] = ''
def test_get_master_address_port(self):
os.environ['CLUSTER_SPEC'] = _CLUSTER_SPEC
address, port = utils.get_master_address_port()
self.assertEqual(address, 'cmle-training-workerpool0-ab-0')
self.assertEqual(port, '2222')
def test_get_master_address_port_default(self):
address, port = utils.get_master_address_port()
self.assertEqual(address, '127.0.0.1')
self.assertEqual(port, '29500')
def test_get_world_size_rank(self):
os.environ['CLUSTER_SPEC'] = _CLUSTER_SPEC
world_size, rank = utils.get_world_size_rank()
self.assertEqual(world_size, 5)
self.assertEqual(rank, 2)
def test_get_world_size_rank_default(self):
world_size, rank = utils.get_world_size_rank()
self.assertEqual(world_size, 1)
self.assertEqual(rank, 0)
def test_wrap_and_unwrap_addresses(self):
arg = '--master=' + utils.get_workerpool_address('workerpool0')
self.assertEqual(arg, '--master=%objectname(workerpool0)%')
os.environ['CLUSTER_SPEC'] = _CLUSTER_SPEC
self.assertEqual(
utils.map_workerpool_address_args([arg]),
['--master=cmle-training-workerpool0-ab-0:2222'],
)
def test_create_workerpool_address_env_vars_script(self):
os.environ['MY_WORKER'] = utils.get_workerpool_address('workerpool0')
os.environ['CLUSTER_SPEC'] = _CLUSTER_SPEC
t = tempfile.NamedTemporaryFile()
utils.create_workerpool_address_env_vars_script(t.name)
expected = """
#!/bin/bash
export MY_WORKER=cmle-training-workerpool0-ab-0:2222
"""
with open(t.name) as f:
self.assertEqual(f.read(), expected.strip())
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/cloud/utils_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
xmanager-main
|
xmanager/cloud/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with Vertex AI.
https://cloud.google.com/vertex-ai/docs/reference/rest
"""
import asyncio
import logging
import math
import os
from typing import Any, Dict, List, Optional, Sequence, Tuple
import attr
from google.cloud import aiplatform
from google.cloud import aiplatform_v1 as aip_v1
from xmanager import xm
from xmanager.cloud import auth
from xmanager.xm import utils
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local import execution as local_execution
from xmanager.xm_local import executors as local_executors
from xmanager.xm_local import status as local_status
_DEFAULT_LOCATION = 'us-central1'
# The only machines available on AI Platform are N1 machines and A2 machines.
# https://cloud.google.com/ai-platform-unified/docs/predictions/machine-types#machine_type_comparison
# TODO: Move lookup to a canonical place.
_MACHINE_TYPE_TO_CPU_RAM = {
'n1-standard-4': (4, 15 * xm.GiB),
'n1-standard-8': (8, 30 * xm.GiB),
'n1-standard-16': (16, 60 * xm.GiB),
'n1-standard-32': (32, 120 * xm.GiB),
'n1-standard-64': (64, 240 * xm.GiB),
'n1-standard-96': (96, 360 * xm.GiB),
'n1-highmem-2': (2, 13 * xm.GiB),
'n1-highmem-4': (4, 26 * xm.GiB),
'n1-highmem-8': (8, 52 * xm.GiB),
'n1-highmem-16': (16, 104 * xm.GiB),
'n1-highmem-32': (32, 208 * xm.GiB),
'n1-highmem-64': (64, 416 * xm.GiB),
'n1-highmem-96': (96, 624 * xm.GiB),
'n1-highcpu-16': (16, 14 * xm.GiB),
'n1-highcpu-32': (32, 28 * xm.GiB),
'n1-highcpu-64': (64, 57 * xm.GiB),
'n1-highcpu-96': (96, 86 * xm.GiB),
}
_A100_GPUS_TO_MACHINE_TYPE = {
1: 'a2-highgpu-1g',
2: 'a2-highgpu-2g',
4: 'a2-highgpu-4g',
8: 'a2-highgpu-8g',
16: 'a2-megagpu-16g',
}
_CLOUD_TPU_ACCELERATOR_TYPES = {
xm.ResourceType.TPU_V2: 'TPU_V2',
xm.ResourceType.TPU_V3: 'TPU_V3',
}
_STATE_TO_STATUS = {
aip_v1.JobState.JOB_STATE_SUCCEEDED: (
local_status.LocalWorkUnitStatusEnum.COMPLETED
),
aip_v1.JobState.JOB_STATE_CANCELLED: (
local_status.LocalWorkUnitStatusEnum.CANCELLED
),
aip_v1.JobState.JOB_STATE_QUEUED: (
local_status.LocalWorkUnitStatusEnum.RUNNING
),
aip_v1.JobState.JOB_STATE_PENDING: (
local_status.LocalWorkUnitStatusEnum.RUNNING
),
aip_v1.JobState.JOB_STATE_RUNNING: (
local_status.LocalWorkUnitStatusEnum.RUNNING
),
aip_v1.JobState.JOB_STATE_CANCELLING: (
local_status.LocalWorkUnitStatusEnum.RUNNING
),
aip_v1.JobState.JOB_STATE_PAUSED: (
local_status.LocalWorkUnitStatusEnum.RUNNING
),
aip_v1.JobState.JOB_STATE_FAILED: (
local_status.LocalWorkUnitStatusEnum.FAILED
),
}
# Hide noisy warning regarding:
# `file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth`
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
_default_client = None
def _vertex_job_predicate(job: xm.Job) -> bool:
return isinstance(job.executor, local_executors.Vertex)
class Client:
"""Client class for interacting with AI Platform (Unified)."""
def __init__(
self, project: Optional[str] = None, location: str = _DEFAULT_LOCATION
) -> None:
"""Create a client.
Args:
project: GCP project ID.
location: Location of the API endpoint. Defaults to region us-central1.
https://cloud.google.com/vertex-ai/docs/reference/rest#service-endpoint
"""
self.location = location
self.project = project or auth.get_project_name()
self.parent = f'projects/{self.project}/locations/{self.location}'
aiplatform.init(project=self.project, location=self.location)
def launch(self, name: str, jobs: Sequence[xm.Job]) -> str:
"""Launch jobs on AI Platform (Unified)."""
pools = []
tensorboard, output_dir = self.get_tensorboard_settings(jobs)
for i, job in enumerate(jobs):
executable = job.executable
if not isinstance(
executable, local_executables.GoogleContainerRegistryImage
):
raise ValueError(
'Executable {} has type {}. Executable must be of '
'type GoogleContainerRegistryImage'.format(
executable, type(executable)
)
)
args = xm.merge_args(executable.args, job.args).to_list(utils.ARG_ESCAPER)
env_vars = {**executable.env_vars, **job.env_vars}
env = [{'name': k, 'value': v} for k, v in env_vars.items()]
if i == 0 and job.executor.requirements.replicas > 1:
raise ValueError(
'The first job in a JobGroup using the Vertex AI '
'executor cannot have requirements.replicas > 1.'
)
pool = aip_v1.WorkerPoolSpec(
machine_spec=get_machine_spec(job),
container_spec=aip_v1.ContainerSpec(
image_uri=executable.image_path,
args=args,
env=env,
),
replica_count=job.executor.requirements.replicas,
)
pools.append(pool)
# TOOD(chenandrew): Vertex Training only allows for 4 worker pools.
# If Vertex does not implement more worker pools, another work-around
# is to simply put all jobs into the same worker pool as replicas.
# Each replica would contain the same image, but would execute different
# modules based the replica index. A disadvantage of this implementation
# is that every replica must have the same machine_spec.
if len(pools) > 4:
raise ValueError(
'Cloud Job for xm jobs {} contains {} worker types. '
'Only 4 worker types are supported'.format(jobs, len(pools))
)
custom_job = aiplatform.CustomJob(
project=self.project,
location=self.location,
display_name=name,
worker_pool_specs=pools,
base_output_dir=output_dir,
staging_bucket=f'gs://{auth.get_bucket()}',
)
# TODO Vertex AI can't use TPUs with SA.
# https://github.com/deepmind/xmanager/issues/11
service_account = auth.get_service_account()
for job in jobs:
assert isinstance(job.executor, local_executors.Vertex)
if job.executor.requirements.accelerator in xm.TpuType:
service_account = None
break
custom_job.submit(
service_account=service_account,
tensorboard=tensorboard,
enable_web_access=True,
)
print(f'Job launched at: {custom_job._dashboard_uri()}') # pylint: disable=protected-access
return custom_job.resource_name
def get_tensorboard_settings(self, jobs: Sequence[xm.Job]) -> Tuple[str, str]:
"""Get the tensorboard settings for a sequence of Jobs."""
executors = []
for job in jobs:
assert isinstance(job.executor, local_executors.Vertex)
executors.append(job.executor)
if all(not executor.tensorboard for executor in executors):
return '', ''
if not executors[0].tensorboard:
raise ValueError(
'Jobs in this job group must have the same tensorboard settings. '
+ 'jobs[0] has no tensorboard settings.'
)
output_dir = executors[0].tensorboard.base_output_directory
tensorboard = executors[0].tensorboard.name
for i, executor in enumerate(executors):
if not executor:
raise ValueError(
'Jobs in this job group must have the same tensorboard settings. '
+ 'jobs[i] has no tensorboard settings.'
)
if (
executor.tensorboard.name != tensorboard
or executor.tensorboard.base_output_directory != output_dir
):
raise ValueError(
'Jobs in this job group must have the same tensorboard settings. '
+ f'jobs[0] has tensorboard = {tensorboard} and output_dir ='
+ f'{output_dir}. jobs[{i}] has tensorboard = '
+ f'{executor.tensorboard.name} and output_dir = '
f'{executor.tensorboard.base_output_directory}.'
)
if output_dir and not output_dir.startswith('gs://'):
output_dir = os.path.join('gs://', output_dir)
return tensorboard, output_dir
async def wait_for_job(self, job_name: str) -> None:
job = aiplatform.CustomJob.get(job_name)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, job._block_until_complete) # pylint: disable=protected-access
def cancel(self, job_name: str) -> None:
aiplatform.CustomJob.get(job_name).cancel()
async def get_or_create_tensorboard(self, name: str) -> str:
"""Gets or creates a Vertex Tensorboard instance."""
tensorboard_client = aip_v1.TensorboardServiceAsyncClient(
client_options={
'api_endpoint': f'{self.location}-aiplatform.googleapis.com'
}
)
request = aip_v1.ListTensorboardsRequest(
parent=self.parent, filter=f'displayName={name}'
)
response = await tensorboard_client.list_tensorboards(request)
async for page in response.pages:
if page.tensorboards:
return response.tensorboards[0].name
return await self.create_tensorboard(name)
async def create_tensorboard(self, name: str) -> str:
"""Creates a Vertex Tensorboard instance."""
tensorboard_client = aip_v1.TensorboardServiceAsyncClient(
client_options={
'api_endpoint': f'{self.location}-aiplatform.googleapis.com'
}
)
tensorboard = aip_v1.Tensorboard(display_name=name)
op = await tensorboard_client.create_tensorboard(
aip_v1.CreateTensorboardRequest(
parent=self.parent,
tensorboard=tensorboard,
)
)
return (await op.result()).name
def get_state(self, job_name: str) -> aip_v1.JobState:
return aiplatform.CustomJob.get(job_name).state
def set_default_client(client: Client) -> None:
global _default_client
_default_client = client
def get_default_client():
global _default_client
if _default_client is None:
_default_client = Client()
return _default_client
def get_machine_spec(job: xm.Job) -> Dict[str, Any]:
"""Get the GCP machine type that best matches the Job's requirements."""
assert isinstance(job.executor, local_executors.Vertex)
requirements = job.executor.requirements
spec = {}
for resource, value in requirements.task_requirements.items():
accelerator_type = None
if resource in xm.GpuType:
accelerator_type = 'NVIDIA_TESLA_' + str(resource).upper()
elif resource in xm.TpuType:
accelerator_type = _CLOUD_TPU_ACCELERATOR_TYPES[resource]
if accelerator_type:
spec['accelerator_type'] = aip_v1.AcceleratorType[accelerator_type]
spec['accelerator_count'] = int(value)
accelerator = spec.get('accelerator_type', None)
if accelerator and accelerator == aip_v1.AcceleratorType.NVIDIA_TESLA_A100:
for gpus, machine_type in sorted(_A100_GPUS_TO_MACHINE_TYPE.items()):
if spec['accelerator_count'] <= gpus:
spec['machine_type'] = machine_type
break
if not spec.get('machine_type', None):
raise ValueError(
'a100={} does not fit in any valid machine type'.format(
spec['accelerator_count']
)
)
elif (
accelerator == aip_v1.AcceleratorType.TPU_V2
or accelerator == aip_v1.AcceleratorType.TPU_V3
):
spec['machine_type'] = 'cloud-tpu'
else:
spec['machine_type'] = cpu_ram_to_machine_type(
requirements.task_requirements.get(xm.ResourceType.CPU),
requirements.task_requirements.get(xm.ResourceType.RAM),
)
return spec
@attr.s(auto_attribs=True)
class VertexHandle(local_execution.ExecutionHandle):
"""A handle for referring to the launched container."""
job_name: str
async def wait(self) -> None:
await get_default_client().wait_for_job(self.job_name)
def stop(self) -> None:
get_default_client().cancel(self.job_name)
def get_status(self) -> local_status.LocalWorkUnitStatus:
state = get_default_client().get_state(self.job_name)
status = _STATE_TO_STATUS[int(state)]
return local_status.LocalWorkUnitStatus(status=status)
# Must act on all jobs with `local_executors.Vertex` executor.
def launch(
experiment_title: str, work_unit_name: str, job_group: xm.JobGroup
) -> List[VertexHandle]:
"""Launch Vertex jobs in the job_group and return a handler."""
jobs = xm.job_operators.collect_jobs_by_filter(
job_group, _vertex_job_predicate
)
# As client creation may throw, do not initiate it if there are no jobs.
if not jobs:
return []
job_name = get_default_client().launch(
name=f'{experiment_title}_{work_unit_name}',
jobs=jobs,
)
return [VertexHandle(job_name=job_name)]
def cpu_ram_to_machine_type(cpu: Optional[int], ram: Optional[int]) -> str:
"""Convert a cpu and memory spec into a machine type."""
cpu = cpu or 0
ram = ram or 0
if cpu + ram == 0:
return 'n1-standard-4'
optimal_machine_type = ''
optimal_excess_resources = math.inf
for machine_type, (
machine_cpu,
machine_ram,
) in _MACHINE_TYPE_TO_CPU_RAM.items():
if machine_cpu >= cpu and machine_ram >= ram:
excess = machine_cpu + machine_ram - cpu - ram
if excess < optimal_excess_resources:
optimal_machine_type = machine_type
optimal_excess_resources = excess
if optimal_machine_type:
return optimal_machine_type
raise ValueError(
'(cpu={}, ram={}) does not fit in any valid machine type'.format(cpu, ram)
)
|
xmanager-main
|
xmanager/cloud/vertex.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from xmanager import xm
from xmanager.cloud import build_image
class BuildImageTest(absltest.TestCase):
def create_container(self, entrypoint) -> xm.PythonContainer:
return xm.PythonContainer(entrypoint=entrypoint)
def test_get_entrypoint_commands_module_adds_suffix(self):
project = self.create_container(xm.ModuleName('some.python.module'))
entrypoint_commands = build_image._get_entrypoint_commands(project)
self.assertEndsWith(entrypoint_commands, ' "$@"')
def test_get_entrypoint_commands_adds_suffix(self):
commands = ['echo "aaa"']
project = self.create_container(xm.CommandList(commands))
entrypoint_commands = build_image._get_entrypoint_commands(project)
self.assertEndsWith(entrypoint_commands, ' "$@"')
def test_get_entrypoint_commands_no_dup_plain_suffix(self):
commands = ['echo "aaa" $@']
project = self.create_container(xm.CommandList(commands))
entrypoint_commands = build_image._get_entrypoint_commands(project)
self.assertEndsWith(entrypoint_commands, ' $@')
def test_get_entrypoint_commands_no_dup_quoted_suffix(self):
commands = ['echo "aaa" "$@"']
project = self.create_container(xm.CommandList(commands))
entrypoint_commands = build_image._get_entrypoint_commands(project)
self.assertEndsWith(entrypoint_commands, ' "$@"')
self.assertNotEndsWith(entrypoint_commands, ' "$@" "$@"')
def test_get_entrypoint_commands_dup_single_quoted_suffix(self):
commands = ['echo "aaa" \'$@\'']
project = self.create_container(xm.CommandList(commands))
entrypoint_commands = build_image._get_entrypoint_commands(project)
self.assertEndsWith(entrypoint_commands, ' \'$@\' "$@"')
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/cloud/build_image_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods to find the cluster details when running on Vertex."""
import json
import os
import re
from typing import List, Sequence, Tuple
import urllib.request
from absl import logging
from google.cloud import storage
def get_master_address_port() -> Tuple[str, str]:
"""Get the master worker from CLUSTER_SPEC.
https://cloud.google.com/ai-platform/training/docs/distributed-training-containers#about-cluster-spec
Returns:
address string and port string
"""
cluster_spec = os.environ.get('CLUSTER_SPEC', None)
logging.info('CLUSTER_SPEC: %s', cluster_spec)
if not cluster_spec or 'workerpool0' not in cluster_spec:
return '127.0.0.1', '29500'
cluster_spec = json.loads(cluster_spec)
master = cluster_spec['cluster']['workerpool0'][0]
[addr, port] = master.split(':')
return addr, port
def get_world_size_rank() -> Tuple[int, int]:
"""Get the world size and rank of the current replica from CLUSTER_SPEC."""
cluster_spec = os.environ.get('CLUSTER_SPEC', None)
if not cluster_spec:
return 1, 0
cluster_spec = json.loads(cluster_spec)
world_size = 0
for pool in cluster_spec['cluster']:
if pool == cluster_spec['task']['type']:
rank = world_size + cluster_spec['task']['index']
world_size += len(cluster_spec['cluster'][pool])
logging.info('WORLD SIZE: %d; RANK: %d', world_size, rank)
return world_size, rank
def create_cluster_specs(workers: Sequence[str]) -> List[str]:
"""Takes a list of domain names and constructs a CLUSTER_SPEC for each."""
cluster = {}
for i, domain in enumerate(workers):
cluster[f'workerpool{i}'] = [domain]
specs = []
for i in range(len(workers)):
spec = {
'cluster': cluster,
'task': {'type': f'workerpool{i}', 'index': i},
}
specs.append(json.dumps(spec))
return specs
def get_workerpool_address(workerpool: str) -> str:
"""Creates a late-binding that is mapped at runtime."""
return f'%objectname({workerpool})%'
def map_workerpool_address_args(args: List[str]) -> List[str]:
"""Maps late-binding to workerpool addresses at runtime."""
cluster_spec = os.environ.get('CLUSTER_SPEC')
if cluster_spec is None:
return args
# capture %objectname(<capture-group>)%
late_bind_regex = re.compile(r'\%objectname\((.*)\)\%')
cluster_spec = json.loads(cluster_spec)['cluster']
result = []
for arg in args:
match = late_bind_regex.search(arg)
if match is None:
result.append(arg)
else:
worker_type = match.group(1)
result.append(
arg.replace(
f'%objectname({worker_type})%', cluster_spec[worker_type][0]
)
)
return result
def print_workerpool_address_args(argv: List[str]) -> None:
# Note that this is method is called by
# third_party/py/xmanager/cloud/data/wrapped_entrypoint.sh
for arg in map_workerpool_address_args(argv[1:]):
print(
arg,
)
def create_workerpool_address_env_vars_script(path: str) -> None:
"""Create a script to map late-binding env vars to their value at runtime."""
with open(path, 'w') as f:
f.write('#!/bin/bash\n\n')
cluster_spec = os.environ.get('CLUSTER_SPEC', None)
if cluster_spec is None:
return
content = []
# capture %objectname(<capture-group>)%
late_bind_regex = re.compile(r'\%objectname\((.*)\)\%')
cluster_spec = json.loads(cluster_spec)['cluster']
for key, value in os.environ.items():
match = late_bind_regex.match(value)
if match:
worker_type = match.group(1)
content.append(f'export {key}={cluster_spec[worker_type][0]}')
if content:
with open(path, 'a') as f:
f.write('\n'.join(content))
def get_region() -> str:
"""Get the region of the current GCE VM from metadata, e.g. us-central1."""
# Default VM instance metadata
# https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata
request = urllib.request.Request(
'http://metadata.google.internal/computeMetadata/v1/instance/zone'
)
request.add_header('Metadata-Flavor', 'Google')
response = urllib.request.urlopen(request)
content = str(response.read())
zone = content.strip("'").split('/')[-1]
region = zone[:-2]
return region
def get_closest_bucket(bucket_names: List[str]) -> str:
"""Get the closest bucket from a list of buckets."""
region = get_region()
for name in bucket_names:
b = storage.Bucket(client=storage.Client(), name=name)
b.reload()
# Only works for REGIONAL and MULT_REGIONAL.
if region.startswith(b.location.lower()):
return name
logging.info('None of the buckets are close. Picking a random one.')
return bucket_names[0]
|
xmanager-main
|
xmanager/cloud/utils.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with Kubernetes."""
import asyncio
import functools
from typing import Callable, Dict, List, Optional, Sequence
from absl import flags
import attr
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from xmanager import xm
from xmanager.xm import utils
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local import execution as local_execution
from xmanager.xm_local import executors as local_executors
from xmanager.xm_local import status as local_status
_K8S_SERVICE_ACCOUNT_NAME = flags.DEFINE_string(
'xm_k8s_service_account_name',
'default',
(
'Specifies the Kubernetes Service Account name to be used by XManager'
' inthe pod specifications.'
),
)
@functools.lru_cache()
def client():
# Global singleton defers client creation until an actual launch.
# If the user only launches local jobs, they don't need to create a client.
return Client()
def _kubernetes_job_predicate(job: xm.Job) -> bool:
return isinstance(job.executor, local_executors.Kubernetes)
def convert_to_valid_label(label: str) -> str:
"""Kubernetes labels must be RFC 1123 format compliant.
https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/apps/validation/validation.go
A lowercase RFC 1123 label must consist of lower case alphanumeric characters
or '-', and must start and end with an alphanumeric character (e.g. 'my-name',
or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')
Args:
label: Label that may or may not be RFC 1123 compliant.
Returns:
A RFC 1123 format compliant label.
"""
return label.replace('_', '-')
def _load_k8s_config() -> None:
"""Loads K8s config based on where the client is running (inside cluster or not)."""
try:
k8s_config.load_incluster_config()
except k8s_config.ConfigException:
# Client is not running inside cluster
k8s_config.load_kube_config()
class Client:
"""Client class for interacting with Kubernetes."""
def __init__(self, api_client: Optional[k8s_client.ApiClient] = None) -> None:
if api_client is None:
_load_k8s_config()
api_client = k8s_client.ApiClient()
self.api_client = api_client
def launch(
self,
get_full_job_name: Callable[[str], str],
jobs: Sequence[xm.Job],
) -> List[k8s_client.V1Job]:
"""Launches jobs on Kubernetes."""
batch_jobs = []
service = 'experiments'
for job in jobs:
executable = job.executable
executor = job.executor
if not isinstance(
executable, local_executables.GoogleContainerRegistryImage
):
raise ValueError(
'Executable {} has type {}. Executable must be of '
'type GoogleContainerRegistryImage.'.format(
executable, type(executable)
)
)
all_env_vars = {**executable.env_vars, **job.env_vars}
env = [k8s_client.V1EnvVar(k, v) for k, v in all_env_vars.items()]
job_name = convert_to_valid_label(get_full_job_name(job.name))
container = k8s_client.V1Container(
name=job_name,
image=executable.image_path,
resources=requirements_from_executor(executor),
args=xm.merge_args(executable.args, job.args).to_list(
utils.ARG_ESCAPER
),
env=env,
)
k8s_job = k8s_client.V1Job()
k8s_job.metadata = k8s_client.V1ObjectMeta(name=job_name)
k8s_job.spec = k8s_client.V1JobSpec(
template=k8s_client.V1PodTemplateSpec(
metadata=k8s_client.V1ObjectMeta(
labels={'service': service},
annotations=annotations_from_executor(executor),
),
spec=k8s_client.V1PodSpec(
service_account=_K8S_SERVICE_ACCOUNT_NAME.value,
hostname=job_name,
subdomain=service,
restart_policy='Never',
containers=[container],
node_selector=node_selector_from_executor(executor),
),
),
backoff_limit=0,
)
batch_jobs.append(k8s_job)
self._create_service(service)
batch_api = k8s_client.BatchV1Api(self.api_client)
for k8s_job in batch_jobs:
batch_api.create_namespaced_job(namespace='default', body=k8s_job)
return batch_jobs
def _create_service(self, service: str) -> None:
"""Creates a K8s service with an `service: {service}` selector."""
core_api = k8s_client.CoreV1Api(self.api_client)
body = k8s_client.V1Service(
metadata=k8s_client.V1ObjectMeta(name=service),
spec=k8s_client.V1ServiceSpec(
selector={'service': service},
cluster_ip='None',
),
)
response = core_api.list_namespaced_service(namespace='default')
for item in response.items:
# service already exists
if item.metadata.name == service:
return
core_api.create_namespaced_service(namespace='default', body=body)
async def wait_for_job(self, job: k8s_client.V1Job) -> None:
batch_api = k8s_client.BatchV1Api(self.api_client)
backoff = 5 # seconds
while True:
await asyncio.sleep(backoff)
response = batch_api.read_namespaced_job_status(
namespace='default', name=job.metadata.name
)
if response.status.completion_time:
return
@attr.s(auto_attribs=True)
class KubernetesHandle(local_execution.ExecutionHandle):
"""A handle for referring to the launched container."""
jobs: List[k8s_client.V1Job]
async def wait(self) -> None:
await asyncio.gather(*[client().wait_for_job(job) for job in self.jobs])
def get_status(self) -> local_status.LocalWorkUnitStatus:
raise NotImplementedError
# Must act on all jobs with `local_executors.Kubernetes` executor.
def launch(
get_full_job_name: Callable[[str], str], job_group: xm.JobGroup
) -> List[KubernetesHandle]:
"""Launch K8s jobs in the job_group and return a handler."""
jobs = xm.job_operators.collect_jobs_by_filter(
job_group, _kubernetes_job_predicate
)
# As client creation may throw, do not initiate it if there are no jobs.
if not jobs:
return []
k8_jobs = client().launch(
get_full_job_name=get_full_job_name,
jobs=jobs,
)
return [KubernetesHandle(jobs=k8_jobs)]
def requirements_from_executor(
executor: local_executors.Kubernetes,
) -> k8s_client.V1ResourceRequirements:
"""Get resource limits from the executor."""
limits = {}
for resource, value in executor.requirements.task_requirements.items():
if resource in xm.GpuType:
# TODO: Implement detection of whether an accelerator is an Nvidia
# GPU. amd.com/gpu is another type of GPU that is not present in GCP.
limits['nvidia.com/gpu'] = f'{value:g}'
elif resource in xm.TpuType:
pass
else:
# Converts resource amount to a string accepted by Kubernetes:
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory
limits[str(resource).lower()] = f'{value:.15g}'
return k8s_client.V1ResourceRequirements(limits=limits)
def annotations_from_executor(
executor: local_executors.Kubernetes,
) -> Dict[str, str]:
"""Get Pod annotations from the executor for TPUs."""
if (
executor.cloud_provider
!= local_executors.GOOGLE_KUBERNETES_ENGINE_CLOUD_PROVIDER
):
return {}
if executor.requirements.accelerator in xm.TpuType:
tpu_runtime_version = 'nightly'
if executor.tpu_capability:
tpu_runtime_version = executor.tpu_capability.tpu_runtime_version
return {'tf-version.cloud-tpus.google.com': tpu_runtime_version}
return {}
def node_selector_from_executor(
executor: local_executors.Kubernetes,
) -> Dict[str, str]:
"""Get Pod annotations from the executor for TPUs."""
if (
executor.cloud_provider
!= local_executors.GOOGLE_KUBERNETES_ENGINE_CLOUD_PROVIDER
):
return {}
for resource in executor.requirements.task_requirements:
if resource in xm.GpuType:
return {
'cloud.google.com/gke-accelerator': (
'nvidia-tesla-' + str(resource).lower()
)
}
return {}
|
xmanager-main
|
xmanager/cloud/kubernetes.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.