id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
22,500
tensorflow/tensor2tensor
tensor2tensor/models/video/sv2p_params.py
next_frame_sv2p_tiny
def next_frame_sv2p_tiny(): """Tiny SV2P model.""" hparams = next_frame_sv2p_atari_softmax() hparams.batch_size = 2 hparams.tiny_mode = True hparams.num_masks = 1 hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 return hparams
python
def next_frame_sv2p_tiny(): """Tiny SV2P model.""" hparams = next_frame_sv2p_atari_softmax() hparams.batch_size = 2 hparams.tiny_mode = True hparams.num_masks = 1 hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 return hparams
[ "def", "next_frame_sv2p_tiny", "(", ")", ":", "hparams", "=", "next_frame_sv2p_atari_softmax", "(", ")", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "tiny_mode", "=", "True", "hparams", ".", "num_masks", "=", "1", "hparams", ".", "video_modality_loss_cutoff", "=", "0.4", "hparams", ".", "video_num_input_frames", "=", "4", "hparams", ".", "video_num_target_frames", "=", "4", "return", "hparams" ]
Tiny SV2P model.
[ "Tiny", "SV2P", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L124-L133
22,501
tensorflow/tensor2tensor
tensor2tensor/models/video/sv2p_params.py
next_frame_sv2p_cutoff
def next_frame_sv2p_cutoff(): """SV2P model with additional cutoff in L2 loss for environments like pong.""" hparams = next_frame_sv2p() hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 return hparams
python
def next_frame_sv2p_cutoff(): """SV2P model with additional cutoff in L2 loss for environments like pong.""" hparams = next_frame_sv2p() hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 return hparams
[ "def", "next_frame_sv2p_cutoff", "(", ")", ":", "hparams", "=", "next_frame_sv2p", "(", ")", "hparams", ".", "video_modality_loss_cutoff", "=", "0.4", "hparams", ".", "video_num_input_frames", "=", "4", "hparams", ".", "video_num_target_frames", "=", "1", "return", "hparams" ]
SV2P model with additional cutoff in L2 loss for environments like pong.
[ "SV2P", "model", "with", "additional", "cutoff", "in", "L2", "loss", "for", "environments", "like", "pong", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L145-L151
22,502
tensorflow/tensor2tensor
tensor2tensor/data_generators/mscoco.py
_get_mscoco
def _get_mscoco(directory): """Download and extract MSCOCO datasets to directory unless it is there.""" for url in _MSCOCO_URLS: filename = os.path.basename(url) download_url = os.path.join(_MSCOCO_ROOT_URL, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os.path.join(directory, filename.strip(".zip")) if not tf.gfile.Exists(unzip_dir): zipfile.ZipFile(path, "r").extractall(directory)
python
def _get_mscoco(directory): """Download and extract MSCOCO datasets to directory unless it is there.""" for url in _MSCOCO_URLS: filename = os.path.basename(url) download_url = os.path.join(_MSCOCO_ROOT_URL, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os.path.join(directory, filename.strip(".zip")) if not tf.gfile.Exists(unzip_dir): zipfile.ZipFile(path, "r").extractall(directory)
[ "def", "_get_mscoco", "(", "directory", ")", ":", "for", "url", "in", "_MSCOCO_URLS", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "download_url", "=", "os", ".", "path", ".", "join", "(", "_MSCOCO_ROOT_URL", ",", "url", ")", "path", "=", "generator_utils", ".", "maybe_download", "(", "directory", ",", "filename", ",", "download_url", ")", "unzip_dir", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ".", "strip", "(", "\".zip\"", ")", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "unzip_dir", ")", ":", "zipfile", ".", "ZipFile", "(", "path", ",", "\"r\"", ")", ".", "extractall", "(", "directory", ")" ]
Download and extract MSCOCO datasets to directory unless it is there.
[ "Download", "and", "extract", "MSCOCO", "datasets", "to", "directory", "unless", "it", "is", "there", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mscoco.py#L49-L57
22,503
tensorflow/tensor2tensor
tensor2tensor/data_generators/mscoco.py
mscoco_generator
def mscoco_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None): """Image generator for MSCOCO captioning problem with token-wise captions. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/class/label: a list of integers representing the caption, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """ eos_list = [1] if eos_list is None else eos_list def get_vocab(): """Get vocab for caption text encoder.""" if data_dir is not None and vocab_filename is not None: vocab_filepath = os.path.join(data_dir, vocab_filename) if tf.gfile.Exists(vocab_filepath): tf.logging.info("Found vocab file: %s", vocab_filepath) vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath) return vocab_symbolizer else: raise ValueError("Vocab file does not exist: %s" % vocab_filepath) return None vocab_symbolizer = get_vocab() _get_mscoco(tmp_dir) caption_filepath = ( _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE) caption_filepath = os.path.join(tmp_dir, caption_filepath) prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX caption_file = io.open(caption_filepath) caption_json = json.load(caption_file) # Dictionary from image_id to ((filename, height, width), captions). image_dict = {} for image in caption_json["images"]: image_dict[image["id"]] = [(image["file_name"], image["height"], image["width"]), []] annotations = caption_json["annotations"] annotation_count = len(annotations) image_count = len(image_dict) tf.logging.info("Processing %d images and %d labels\n" % (image_count, annotation_count)) for annotation in annotations: image_id = annotation["image_id"] image_dict[image_id][1].append(annotation["caption"]) data = list(image_dict.values())[start_from:start_from + how_many] random.shuffle(data) for image_info, labels in data: image_filename = image_info[0] image_filepath = os.path.join(tmp_dir, prefix, image_filename) with tf.gfile.Open(image_filepath, "rb") as f: encoded_image_data = f.read() height, width = image_info[1], image_info[2] for label in labels: if vocab_filename is None or vocab_symbolizer is None: label = [ord(c) for c in label] + eos_list else: label = vocab_symbolizer.encode(label) + eos_list yield { "image/encoded": [encoded_image_data], "image/format": ["jpeg"], "image/class/label": label, "image/height": [height], "image/width": [width] }
python
def mscoco_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None): """Image generator for MSCOCO captioning problem with token-wise captions. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/class/label: a list of integers representing the caption, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """ eos_list = [1] if eos_list is None else eos_list def get_vocab(): """Get vocab for caption text encoder.""" if data_dir is not None and vocab_filename is not None: vocab_filepath = os.path.join(data_dir, vocab_filename) if tf.gfile.Exists(vocab_filepath): tf.logging.info("Found vocab file: %s", vocab_filepath) vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath) return vocab_symbolizer else: raise ValueError("Vocab file does not exist: %s" % vocab_filepath) return None vocab_symbolizer = get_vocab() _get_mscoco(tmp_dir) caption_filepath = ( _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE) caption_filepath = os.path.join(tmp_dir, caption_filepath) prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX caption_file = io.open(caption_filepath) caption_json = json.load(caption_file) # Dictionary from image_id to ((filename, height, width), captions). image_dict = {} for image in caption_json["images"]: image_dict[image["id"]] = [(image["file_name"], image["height"], image["width"]), []] annotations = caption_json["annotations"] annotation_count = len(annotations) image_count = len(image_dict) tf.logging.info("Processing %d images and %d labels\n" % (image_count, annotation_count)) for annotation in annotations: image_id = annotation["image_id"] image_dict[image_id][1].append(annotation["caption"]) data = list(image_dict.values())[start_from:start_from + how_many] random.shuffle(data) for image_info, labels in data: image_filename = image_info[0] image_filepath = os.path.join(tmp_dir, prefix, image_filename) with tf.gfile.Open(image_filepath, "rb") as f: encoded_image_data = f.read() height, width = image_info[1], image_info[2] for label in labels: if vocab_filename is None or vocab_symbolizer is None: label = [ord(c) for c in label] + eos_list else: label = vocab_symbolizer.encode(label) + eos_list yield { "image/encoded": [encoded_image_data], "image/format": ["jpeg"], "image/class/label": label, "image/height": [height], "image/width": [width] }
[ "def", "mscoco_generator", "(", "data_dir", ",", "tmp_dir", ",", "training", ",", "how_many", ",", "start_from", "=", "0", ",", "eos_list", "=", "None", ",", "vocab_filename", "=", "None", ")", ":", "eos_list", "=", "[", "1", "]", "if", "eos_list", "is", "None", "else", "eos_list", "def", "get_vocab", "(", ")", ":", "\"\"\"Get vocab for caption text encoder.\"\"\"", "if", "data_dir", "is", "not", "None", "and", "vocab_filename", "is", "not", "None", ":", "vocab_filepath", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "vocab_filename", ")", "if", "tf", ".", "gfile", ".", "Exists", "(", "vocab_filepath", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Found vocab file: %s\"", ",", "vocab_filepath", ")", "vocab_symbolizer", "=", "text_encoder", ".", "SubwordTextEncoder", "(", "vocab_filepath", ")", "return", "vocab_symbolizer", "else", ":", "raise", "ValueError", "(", "\"Vocab file does not exist: %s\"", "%", "vocab_filepath", ")", "return", "None", "vocab_symbolizer", "=", "get_vocab", "(", ")", "_get_mscoco", "(", "tmp_dir", ")", "caption_filepath", "=", "(", "_MSCOCO_TRAIN_CAPTION_FILE", "if", "training", "else", "_MSCOCO_EVAL_CAPTION_FILE", ")", "caption_filepath", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "caption_filepath", ")", "prefix", "=", "_MSCOCO_TRAIN_PREFIX", "if", "training", "else", "_MSCOCO_EVAL_PREFIX", "caption_file", "=", "io", ".", "open", "(", "caption_filepath", ")", "caption_json", "=", "json", ".", "load", "(", "caption_file", ")", "# Dictionary from image_id to ((filename, height, width), captions).", "image_dict", "=", "{", "}", "for", "image", "in", "caption_json", "[", "\"images\"", "]", ":", "image_dict", "[", "image", "[", "\"id\"", "]", "]", "=", "[", "(", "image", "[", "\"file_name\"", "]", ",", "image", "[", "\"height\"", "]", ",", "image", "[", "\"width\"", "]", ")", ",", "[", "]", "]", "annotations", "=", "caption_json", "[", "\"annotations\"", "]", "annotation_count", "=", "len", "(", "annotations", ")", "image_count", "=", "len", "(", "image_dict", ")", "tf", ".", "logging", ".", "info", "(", "\"Processing %d images and %d labels\\n\"", "%", "(", "image_count", ",", "annotation_count", ")", ")", "for", "annotation", "in", "annotations", ":", "image_id", "=", "annotation", "[", "\"image_id\"", "]", "image_dict", "[", "image_id", "]", "[", "1", "]", ".", "append", "(", "annotation", "[", "\"caption\"", "]", ")", "data", "=", "list", "(", "image_dict", ".", "values", "(", ")", ")", "[", "start_from", ":", "start_from", "+", "how_many", "]", "random", ".", "shuffle", "(", "data", ")", "for", "image_info", ",", "labels", "in", "data", ":", "image_filename", "=", "image_info", "[", "0", "]", "image_filepath", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "prefix", ",", "image_filename", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "image_filepath", ",", "\"rb\"", ")", "as", "f", ":", "encoded_image_data", "=", "f", ".", "read", "(", ")", "height", ",", "width", "=", "image_info", "[", "1", "]", ",", "image_info", "[", "2", "]", "for", "label", "in", "labels", ":", "if", "vocab_filename", "is", "None", "or", "vocab_symbolizer", "is", "None", ":", "label", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "label", "]", "+", "eos_list", "else", ":", "label", "=", "vocab_symbolizer", ".", "encode", "(", "label", ")", "+", "eos_list", "yield", "{", "\"image/encoded\"", ":", "[", "encoded_image_data", "]", ",", "\"image/format\"", ":", "[", "\"jpeg\"", "]", ",", "\"image/class/label\"", ":", "label", ",", "\"image/height\"", ":", "[", "height", "]", ",", "\"image/width\"", ":", "[", "width", "]", "}" ]
Image generator for MSCOCO captioning problem with token-wise captions. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/class/label: a list of integers representing the caption, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type.
[ "Image", "generator", "for", "MSCOCO", "captioning", "problem", "with", "token", "-", "wise", "captions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mscoco.py#L60-L142
22,504
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
flags_as_args
def flags_as_args(): """Convert FLAGS to list of args suitable for passing on cmd line.""" if hasattr(FLAGS, "flag_values_dict"): args_dict = FLAGS.flag_values_dict() else: args_dict = dict(FLAGS.__dict__["__flags"]) del args_dict["cloud_mlengine"] # Configured later del args_dict["t2t_usr_dir"] args_dict.pop("h", None) args_dict.pop("helpfull", None) args_dict.pop("helpshort", None) args_dict.pop("help", None) args = [] for name, val in args_dict.items(): if val is None: continue if name.startswith("autotune"): continue args.extend(["--%s=%s" % (name, str(val))]) return args
python
def flags_as_args(): """Convert FLAGS to list of args suitable for passing on cmd line.""" if hasattr(FLAGS, "flag_values_dict"): args_dict = FLAGS.flag_values_dict() else: args_dict = dict(FLAGS.__dict__["__flags"]) del args_dict["cloud_mlengine"] # Configured later del args_dict["t2t_usr_dir"] args_dict.pop("h", None) args_dict.pop("helpfull", None) args_dict.pop("helpshort", None) args_dict.pop("help", None) args = [] for name, val in args_dict.items(): if val is None: continue if name.startswith("autotune"): continue args.extend(["--%s=%s" % (name, str(val))]) return args
[ "def", "flags_as_args", "(", ")", ":", "if", "hasattr", "(", "FLAGS", ",", "\"flag_values_dict\"", ")", ":", "args_dict", "=", "FLAGS", ".", "flag_values_dict", "(", ")", "else", ":", "args_dict", "=", "dict", "(", "FLAGS", ".", "__dict__", "[", "\"__flags\"", "]", ")", "del", "args_dict", "[", "\"cloud_mlengine\"", "]", "# Configured later", "del", "args_dict", "[", "\"t2t_usr_dir\"", "]", "args_dict", ".", "pop", "(", "\"h\"", ",", "None", ")", "args_dict", ".", "pop", "(", "\"helpfull\"", ",", "None", ")", "args_dict", ".", "pop", "(", "\"helpshort\"", ",", "None", ")", "args_dict", ".", "pop", "(", "\"help\"", ",", "None", ")", "args", "=", "[", "]", "for", "name", ",", "val", "in", "args_dict", ".", "items", "(", ")", ":", "if", "val", "is", "None", ":", "continue", "if", "name", ".", "startswith", "(", "\"autotune\"", ")", ":", "continue", "args", ".", "extend", "(", "[", "\"--%s=%s\"", "%", "(", "name", ",", "str", "(", "val", ")", ")", "]", ")", "return", "args" ]
Convert FLAGS to list of args suitable for passing on cmd line.
[ "Convert", "FLAGS", "to", "list", "of", "args", "suitable", "for", "passing", "on", "cmd", "line", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L93-L113
22,505
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
get_default_master_type
def get_default_master_type(num_gpus=1): """Returns master_type for trainingInput.""" gpus_to_master_map = { 0: "standard", 1: "standard_p100", 4: "complex_model_m_p100", 8: "complex_model_l_gpu", } if num_gpus not in gpus_to_master_map: raise ValueError("Num gpus must be in %s" % str(sorted(list(gpus_to_master_map.keys())))) return gpus_to_master_map[num_gpus]
python
def get_default_master_type(num_gpus=1): """Returns master_type for trainingInput.""" gpus_to_master_map = { 0: "standard", 1: "standard_p100", 4: "complex_model_m_p100", 8: "complex_model_l_gpu", } if num_gpus not in gpus_to_master_map: raise ValueError("Num gpus must be in %s" % str(sorted(list(gpus_to_master_map.keys())))) return gpus_to_master_map[num_gpus]
[ "def", "get_default_master_type", "(", "num_gpus", "=", "1", ")", ":", "gpus_to_master_map", "=", "{", "0", ":", "\"standard\"", ",", "1", ":", "\"standard_p100\"", ",", "4", ":", "\"complex_model_m_p100\"", ",", "8", ":", "\"complex_model_l_gpu\"", ",", "}", "if", "num_gpus", "not", "in", "gpus_to_master_map", ":", "raise", "ValueError", "(", "\"Num gpus must be in %s\"", "%", "str", "(", "sorted", "(", "list", "(", "gpus_to_master_map", ".", "keys", "(", ")", ")", ")", ")", ")", "return", "gpus_to_master_map", "[", "num_gpus", "]" ]
Returns master_type for trainingInput.
[ "Returns", "master_type", "for", "trainingInput", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L116-L127
22,506
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
configure_job
def configure_job(): """Construct jobSpec for ML Engine job.""" # See documentation: # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput training_input = { "pythonModule": "tensor2tensor.bin.t2t_trainer", "args": flags_as_args(), "region": text_encoder.native_to_unicode(default_region()), "runtimeVersion": RUNTIME_VERSION, "pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7", "jobDir": FLAGS.output_dir, "scaleTier": "CUSTOM", "masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type( num_gpus=FLAGS.worker_gpu) } if FLAGS.use_tpu: training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or "standard") training_input["workerType"] = "cloud_tpu" training_input["workerCount"] = 1 if FLAGS.hparams_range: tf.logging.info("Configuring hyperparameter tuning.") training_input["hyperparameters"] = configure_autotune( FLAGS.hparams_range, FLAGS.autotune_objective, FLAGS.autotune_maximize, FLAGS.autotune_max_trials, FLAGS.autotune_parallel_trials, ) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") job_spec = { "jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp), "labels": { "model": FLAGS.model, "problem": FLAGS.problem, "hparams": FLAGS.hparams_set }, "trainingInput": training_input, } return job_spec
python
def configure_job(): """Construct jobSpec for ML Engine job.""" # See documentation: # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput training_input = { "pythonModule": "tensor2tensor.bin.t2t_trainer", "args": flags_as_args(), "region": text_encoder.native_to_unicode(default_region()), "runtimeVersion": RUNTIME_VERSION, "pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7", "jobDir": FLAGS.output_dir, "scaleTier": "CUSTOM", "masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type( num_gpus=FLAGS.worker_gpu) } if FLAGS.use_tpu: training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or "standard") training_input["workerType"] = "cloud_tpu" training_input["workerCount"] = 1 if FLAGS.hparams_range: tf.logging.info("Configuring hyperparameter tuning.") training_input["hyperparameters"] = configure_autotune( FLAGS.hparams_range, FLAGS.autotune_objective, FLAGS.autotune_maximize, FLAGS.autotune_max_trials, FLAGS.autotune_parallel_trials, ) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") job_spec = { "jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp), "labels": { "model": FLAGS.model, "problem": FLAGS.problem, "hparams": FLAGS.hparams_set }, "trainingInput": training_input, } return job_spec
[ "def", "configure_job", "(", ")", ":", "# See documentation:", "# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput", "training_input", "=", "{", "\"pythonModule\"", ":", "\"tensor2tensor.bin.t2t_trainer\"", ",", "\"args\"", ":", "flags_as_args", "(", ")", ",", "\"region\"", ":", "text_encoder", ".", "native_to_unicode", "(", "default_region", "(", ")", ")", ",", "\"runtimeVersion\"", ":", "RUNTIME_VERSION", ",", "\"pythonVersion\"", ":", "\"3.5\"", "if", "sys", ".", "version_info", ".", "major", "==", "3", "else", "\"2.7\"", ",", "\"jobDir\"", ":", "FLAGS", ".", "output_dir", ",", "\"scaleTier\"", ":", "\"CUSTOM\"", ",", "\"masterType\"", ":", "FLAGS", ".", "cloud_mlengine_master_type", "or", "get_default_master_type", "(", "num_gpus", "=", "FLAGS", ".", "worker_gpu", ")", "}", "if", "FLAGS", ".", "use_tpu", ":", "training_input", "[", "\"masterType\"", "]", "=", "(", "FLAGS", ".", "cloud_mlengine_master_type", "or", "\"standard\"", ")", "training_input", "[", "\"workerType\"", "]", "=", "\"cloud_tpu\"", "training_input", "[", "\"workerCount\"", "]", "=", "1", "if", "FLAGS", ".", "hparams_range", ":", "tf", ".", "logging", ".", "info", "(", "\"Configuring hyperparameter tuning.\"", ")", "training_input", "[", "\"hyperparameters\"", "]", "=", "configure_autotune", "(", "FLAGS", ".", "hparams_range", ",", "FLAGS", ".", "autotune_objective", ",", "FLAGS", ".", "autotune_maximize", ",", "FLAGS", ".", "autotune_max_trials", ",", "FLAGS", ".", "autotune_parallel_trials", ",", ")", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y%m%d_%H%M%S\"", ")", "job_spec", "=", "{", "\"jobId\"", ":", "\"%s_%s_t2t_%s\"", "%", "(", "FLAGS", ".", "model", ",", "FLAGS", ".", "problem", ",", "timestamp", ")", ",", "\"labels\"", ":", "{", "\"model\"", ":", "FLAGS", ".", "model", ",", "\"problem\"", ":", "FLAGS", ".", "problem", ",", "\"hparams\"", ":", "FLAGS", ".", "hparams_set", "}", ",", "\"trainingInput\"", ":", "training_input", ",", "}", "return", "job_spec" ]
Construct jobSpec for ML Engine job.
[ "Construct", "jobSpec", "for", "ML", "Engine", "job", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L130-L170
22,507
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
launch_job
def launch_job(job_spec): """Launch job on ML Engine.""" project_id = "projects/{}".format( text_encoder.native_to_unicode(default_project())) credentials = GoogleCredentials.get_application_default() cloudml = discovery.build("ml", "v1", credentials=credentials, cache_discovery=False) request = cloudml.projects().jobs().create(body=job_spec, parent=project_id) request.execute()
python
def launch_job(job_spec): """Launch job on ML Engine.""" project_id = "projects/{}".format( text_encoder.native_to_unicode(default_project())) credentials = GoogleCredentials.get_application_default() cloudml = discovery.build("ml", "v1", credentials=credentials, cache_discovery=False) request = cloudml.projects().jobs().create(body=job_spec, parent=project_id) request.execute()
[ "def", "launch_job", "(", "job_spec", ")", ":", "project_id", "=", "\"projects/{}\"", ".", "format", "(", "text_encoder", ".", "native_to_unicode", "(", "default_project", "(", ")", ")", ")", "credentials", "=", "GoogleCredentials", ".", "get_application_default", "(", ")", "cloudml", "=", "discovery", ".", "build", "(", "\"ml\"", ",", "\"v1\"", ",", "credentials", "=", "credentials", ",", "cache_discovery", "=", "False", ")", "request", "=", "cloudml", ".", "projects", "(", ")", ".", "jobs", "(", ")", ".", "create", "(", "body", "=", "job_spec", ",", "parent", "=", "project_id", ")", "request", ".", "execute", "(", ")" ]
Launch job on ML Engine.
[ "Launch", "job", "on", "ML", "Engine", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L173-L181
22,508
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
_tar_and_copy
def _tar_and_copy(src_dir, target_dir): """Tar and gzip src_dir and copy to GCS target_dir.""" src_dir = src_dir.rstrip("/") target_dir = target_dir.rstrip("/") tmp_dir = tempfile.gettempdir().rstrip("/") src_base = os.path.basename(src_dir) shell_run( "tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .", src_dir=src_dir, src_base=src_base, tmp_dir=tmp_dir) final_destination = "%s/%s.tar.gz" % (target_dir, src_base) shell_run( ("gsutil cp {tmp_dir}/{src_base}.tar.gz " "{final_destination}"), tmp_dir=tmp_dir, src_base=src_base, final_destination=final_destination) return final_destination
python
def _tar_and_copy(src_dir, target_dir): """Tar and gzip src_dir and copy to GCS target_dir.""" src_dir = src_dir.rstrip("/") target_dir = target_dir.rstrip("/") tmp_dir = tempfile.gettempdir().rstrip("/") src_base = os.path.basename(src_dir) shell_run( "tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .", src_dir=src_dir, src_base=src_base, tmp_dir=tmp_dir) final_destination = "%s/%s.tar.gz" % (target_dir, src_base) shell_run( ("gsutil cp {tmp_dir}/{src_base}.tar.gz " "{final_destination}"), tmp_dir=tmp_dir, src_base=src_base, final_destination=final_destination) return final_destination
[ "def", "_tar_and_copy", "(", "src_dir", ",", "target_dir", ")", ":", "src_dir", "=", "src_dir", ".", "rstrip", "(", "\"/\"", ")", "target_dir", "=", "target_dir", ".", "rstrip", "(", "\"/\"", ")", "tmp_dir", "=", "tempfile", ".", "gettempdir", "(", ")", ".", "rstrip", "(", "\"/\"", ")", "src_base", "=", "os", ".", "path", ".", "basename", "(", "src_dir", ")", "shell_run", "(", "\"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .\"", ",", "src_dir", "=", "src_dir", ",", "src_base", "=", "src_base", ",", "tmp_dir", "=", "tmp_dir", ")", "final_destination", "=", "\"%s/%s.tar.gz\"", "%", "(", "target_dir", ",", "src_base", ")", "shell_run", "(", "(", "\"gsutil cp {tmp_dir}/{src_base}.tar.gz \"", "\"{final_destination}\"", ")", ",", "tmp_dir", "=", "tmp_dir", ",", "src_base", "=", "src_base", ",", "final_destination", "=", "final_destination", ")", "return", "final_destination" ]
Tar and gzip src_dir and copy to GCS target_dir.
[ "Tar", "and", "gzip", "src_dir", "and", "copy", "to", "GCS", "target_dir", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L184-L202
22,509
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
tar_and_copy_t2t
def tar_and_copy_t2t(train_dir): """Tar Tensor2Tensor and cp to train_dir.""" tf.logging.info("Tarring and pushing local Tensor2Tensor package.") output = text_encoder.native_to_unicode(shell_output( "pip show tensor2tensor")).split("\n") assert output[1].startswith("Version") assert output[7].startswith("Location") t2t_version = output[1].split(":")[1].strip() t2t_dir = output[7].split(":")[1].strip() # A local installation cloned from GitHub will have a setup.py file and a docs # folder is_local_t2t = all([ tf.gfile.Exists(os.path.join(t2t_dir, fname)) for fname in ["setup.py", "docs/cloud_mlengine.md"] ]) if is_local_t2t: tf.logging.info("Found local T2T installation. Tarring directory %s", t2t_dir) else: # PyPI installation # Create a folder with just a setup.py file pointing to the right version tf.logging.info("Found PyPI T2T installation. Launching tensor2tensor==%s", t2t_version) t2t_dir = os.path.join(tempfile.gettempdir(), "tensor2tensor_tmp") shutil.rmtree(t2t_dir, ignore_errors=True) os.mkdir(t2t_dir) setup_fname = os.path.join(t2t_dir, "setup.py") setup_file_str = get_setup_file( name="DummyT2TPackage", packages=["tensor2tensor==%s" % t2t_version] ) with tf.gfile.Open(setup_fname, "w") as f: f.write(setup_file_str) t2t_tar = _tar_and_copy(t2t_dir, train_dir) return t2t_tar
python
def tar_and_copy_t2t(train_dir): """Tar Tensor2Tensor and cp to train_dir.""" tf.logging.info("Tarring and pushing local Tensor2Tensor package.") output = text_encoder.native_to_unicode(shell_output( "pip show tensor2tensor")).split("\n") assert output[1].startswith("Version") assert output[7].startswith("Location") t2t_version = output[1].split(":")[1].strip() t2t_dir = output[7].split(":")[1].strip() # A local installation cloned from GitHub will have a setup.py file and a docs # folder is_local_t2t = all([ tf.gfile.Exists(os.path.join(t2t_dir, fname)) for fname in ["setup.py", "docs/cloud_mlengine.md"] ]) if is_local_t2t: tf.logging.info("Found local T2T installation. Tarring directory %s", t2t_dir) else: # PyPI installation # Create a folder with just a setup.py file pointing to the right version tf.logging.info("Found PyPI T2T installation. Launching tensor2tensor==%s", t2t_version) t2t_dir = os.path.join(tempfile.gettempdir(), "tensor2tensor_tmp") shutil.rmtree(t2t_dir, ignore_errors=True) os.mkdir(t2t_dir) setup_fname = os.path.join(t2t_dir, "setup.py") setup_file_str = get_setup_file( name="DummyT2TPackage", packages=["tensor2tensor==%s" % t2t_version] ) with tf.gfile.Open(setup_fname, "w") as f: f.write(setup_file_str) t2t_tar = _tar_and_copy(t2t_dir, train_dir) return t2t_tar
[ "def", "tar_and_copy_t2t", "(", "train_dir", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Tarring and pushing local Tensor2Tensor package.\"", ")", "output", "=", "text_encoder", ".", "native_to_unicode", "(", "shell_output", "(", "\"pip show tensor2tensor\"", ")", ")", ".", "split", "(", "\"\\n\"", ")", "assert", "output", "[", "1", "]", ".", "startswith", "(", "\"Version\"", ")", "assert", "output", "[", "7", "]", ".", "startswith", "(", "\"Location\"", ")", "t2t_version", "=", "output", "[", "1", "]", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", "t2t_dir", "=", "output", "[", "7", "]", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", "# A local installation cloned from GitHub will have a setup.py file and a docs", "# folder", "is_local_t2t", "=", "all", "(", "[", "tf", ".", "gfile", ".", "Exists", "(", "os", ".", "path", ".", "join", "(", "t2t_dir", ",", "fname", ")", ")", "for", "fname", "in", "[", "\"setup.py\"", ",", "\"docs/cloud_mlengine.md\"", "]", "]", ")", "if", "is_local_t2t", ":", "tf", ".", "logging", ".", "info", "(", "\"Found local T2T installation. Tarring directory %s\"", ",", "t2t_dir", ")", "else", ":", "# PyPI installation", "# Create a folder with just a setup.py file pointing to the right version", "tf", ".", "logging", ".", "info", "(", "\"Found PyPI T2T installation. Launching tensor2tensor==%s\"", ",", "t2t_version", ")", "t2t_dir", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "\"tensor2tensor_tmp\"", ")", "shutil", ".", "rmtree", "(", "t2t_dir", ",", "ignore_errors", "=", "True", ")", "os", ".", "mkdir", "(", "t2t_dir", ")", "setup_fname", "=", "os", ".", "path", ".", "join", "(", "t2t_dir", ",", "\"setup.py\"", ")", "setup_file_str", "=", "get_setup_file", "(", "name", "=", "\"DummyT2TPackage\"", ",", "packages", "=", "[", "\"tensor2tensor==%s\"", "%", "t2t_version", "]", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "setup_fname", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "setup_file_str", ")", "t2t_tar", "=", "_tar_and_copy", "(", "t2t_dir", ",", "train_dir", ")", "return", "t2t_tar" ]
Tar Tensor2Tensor and cp to train_dir.
[ "Tar", "Tensor2Tensor", "and", "cp", "to", "train_dir", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L205-L242
22,510
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
tar_and_copy_usr_dir
def tar_and_copy_usr_dir(usr_dir, train_dir): """Package, tar, and copy usr_dir to GCS train_dir.""" tf.logging.info("Tarring and pushing t2t_usr_dir.") usr_dir = os.path.abspath(os.path.expanduser(usr_dir)) # Copy usr dir to a temp location top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container") tmp_usr_dir = os.path.join(top_dir, usr_dir_lib.INTERNAL_USR_DIR_PACKAGE) shutil.rmtree(top_dir, ignore_errors=True) shutil.copytree(usr_dir, tmp_usr_dir) # Insert setup.py if one does not exist top_setup_fname = os.path.join(top_dir, "setup.py") setup_file_str = get_setup_file( name="DummyUsrDirPackage", packages=get_requirements(usr_dir) ) with tf.gfile.Open(top_setup_fname, "w") as f: f.write(setup_file_str) usr_tar = _tar_and_copy(top_dir, train_dir) return usr_tar
python
def tar_and_copy_usr_dir(usr_dir, train_dir): """Package, tar, and copy usr_dir to GCS train_dir.""" tf.logging.info("Tarring and pushing t2t_usr_dir.") usr_dir = os.path.abspath(os.path.expanduser(usr_dir)) # Copy usr dir to a temp location top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container") tmp_usr_dir = os.path.join(top_dir, usr_dir_lib.INTERNAL_USR_DIR_PACKAGE) shutil.rmtree(top_dir, ignore_errors=True) shutil.copytree(usr_dir, tmp_usr_dir) # Insert setup.py if one does not exist top_setup_fname = os.path.join(top_dir, "setup.py") setup_file_str = get_setup_file( name="DummyUsrDirPackage", packages=get_requirements(usr_dir) ) with tf.gfile.Open(top_setup_fname, "w") as f: f.write(setup_file_str) usr_tar = _tar_and_copy(top_dir, train_dir) return usr_tar
[ "def", "tar_and_copy_usr_dir", "(", "usr_dir", ",", "train_dir", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Tarring and pushing t2t_usr_dir.\"", ")", "usr_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "usr_dir", ")", ")", "# Copy usr dir to a temp location", "top_dir", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "\"t2t_usr_container\"", ")", "tmp_usr_dir", "=", "os", ".", "path", ".", "join", "(", "top_dir", ",", "usr_dir_lib", ".", "INTERNAL_USR_DIR_PACKAGE", ")", "shutil", ".", "rmtree", "(", "top_dir", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "copytree", "(", "usr_dir", ",", "tmp_usr_dir", ")", "# Insert setup.py if one does not exist", "top_setup_fname", "=", "os", ".", "path", ".", "join", "(", "top_dir", ",", "\"setup.py\"", ")", "setup_file_str", "=", "get_setup_file", "(", "name", "=", "\"DummyUsrDirPackage\"", ",", "packages", "=", "get_requirements", "(", "usr_dir", ")", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "top_setup_fname", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "setup_file_str", ")", "usr_tar", "=", "_tar_and_copy", "(", "top_dir", ",", "train_dir", ")", "return", "usr_tar" ]
Package, tar, and copy usr_dir to GCS train_dir.
[ "Package", "tar", "and", "copy", "usr_dir", "to", "GCS", "train_dir", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L245-L263
22,511
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
validate_flags
def validate_flags(): """Validates flags are set to acceptable values for CloudML Engine runs.""" assert not job_dir() assert FLAGS.output_dir.startswith("gs://") assert FLAGS.data_dir.startswith("gs://") assert FLAGS.worker_replicas <= 1 assert FLAGS.ps_replicas <= 0 if FLAGS.hparams_range: assert FLAGS.autotune_objective if FLAGS.worker_gpu: assert FLAGS.worker_gpu in [1, 4, 8] if FLAGS.cloud_mlengine_master_type: if FLAGS.worker_gpu: if FLAGS.worker_gpu == 1: assert FLAGS.cloud_mlengine_master_type in ["standard_gpu", "standard_p100"] elif FLAGS.worker_gpu == 4: assert FLAGS.cloud_mlengine_master_type in ["complex_model_m_gpu", "complex_model_m_p100"] else: assert FLAGS.cloud_mlengine_master_type == "complex_model_l_gpu" else: assert FLAGS.cloud_mlengine_master_type in ["standard", "large_model", "complex_model_s", "complex_model_m", "complex_model_l"]
python
def validate_flags(): """Validates flags are set to acceptable values for CloudML Engine runs.""" assert not job_dir() assert FLAGS.output_dir.startswith("gs://") assert FLAGS.data_dir.startswith("gs://") assert FLAGS.worker_replicas <= 1 assert FLAGS.ps_replicas <= 0 if FLAGS.hparams_range: assert FLAGS.autotune_objective if FLAGS.worker_gpu: assert FLAGS.worker_gpu in [1, 4, 8] if FLAGS.cloud_mlengine_master_type: if FLAGS.worker_gpu: if FLAGS.worker_gpu == 1: assert FLAGS.cloud_mlengine_master_type in ["standard_gpu", "standard_p100"] elif FLAGS.worker_gpu == 4: assert FLAGS.cloud_mlengine_master_type in ["complex_model_m_gpu", "complex_model_m_p100"] else: assert FLAGS.cloud_mlengine_master_type == "complex_model_l_gpu" else: assert FLAGS.cloud_mlengine_master_type in ["standard", "large_model", "complex_model_s", "complex_model_m", "complex_model_l"]
[ "def", "validate_flags", "(", ")", ":", "assert", "not", "job_dir", "(", ")", "assert", "FLAGS", ".", "output_dir", ".", "startswith", "(", "\"gs://\"", ")", "assert", "FLAGS", ".", "data_dir", ".", "startswith", "(", "\"gs://\"", ")", "assert", "FLAGS", ".", "worker_replicas", "<=", "1", "assert", "FLAGS", ".", "ps_replicas", "<=", "0", "if", "FLAGS", ".", "hparams_range", ":", "assert", "FLAGS", ".", "autotune_objective", "if", "FLAGS", ".", "worker_gpu", ":", "assert", "FLAGS", ".", "worker_gpu", "in", "[", "1", ",", "4", ",", "8", "]", "if", "FLAGS", ".", "cloud_mlengine_master_type", ":", "if", "FLAGS", ".", "worker_gpu", ":", "if", "FLAGS", ".", "worker_gpu", "==", "1", ":", "assert", "FLAGS", ".", "cloud_mlengine_master_type", "in", "[", "\"standard_gpu\"", ",", "\"standard_p100\"", "]", "elif", "FLAGS", ".", "worker_gpu", "==", "4", ":", "assert", "FLAGS", ".", "cloud_mlengine_master_type", "in", "[", "\"complex_model_m_gpu\"", ",", "\"complex_model_m_p100\"", "]", "else", ":", "assert", "FLAGS", ".", "cloud_mlengine_master_type", "==", "\"complex_model_l_gpu\"", "else", ":", "assert", "FLAGS", ".", "cloud_mlengine_master_type", "in", "[", "\"standard\"", ",", "\"large_model\"", ",", "\"complex_model_s\"", ",", "\"complex_model_m\"", ",", "\"complex_model_l\"", "]" ]
Validates flags are set to acceptable values for CloudML Engine runs.
[ "Validates", "flags", "are", "set", "to", "acceptable", "values", "for", "CloudML", "Engine", "runs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L298-L323
22,512
tensorflow/tensor2tensor
tensor2tensor/utils/cloud_mlengine.py
launch
def launch(): """Launch t2t_trainer on Cloud ML Engine.""" validate_flags() job_spec = configure_job() job_name = job_spec["jobId"] tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name, pprint.pformat(job_spec)) assert confirm() train_dir = FLAGS.output_dir t2t_tar = tar_and_copy_t2t(train_dir) configure_trainer_package(job_spec, t2t_tar) if FLAGS.t2t_usr_dir: usr_tar = tar_and_copy_usr_dir(FLAGS.t2t_usr_dir, train_dir) configure_usr_dir(job_spec, usr_tar) launch_job(job_spec) tf.logging.info("Launched %s. See console to track: %s.", job_name, CONSOLE_URL) tf.logging.info("Interact with the training job from the command line:") tf.logging.info("Abort job: gcloud ml-engine jobs cancel %s", job_name) tf.logging.info("Stream logs: gcloud ml-engine jobs stream-logs %s", job_name) tf.logging.info("Open tensorboard: tensorboard --logdir %s", train_dir)
python
def launch(): """Launch t2t_trainer on Cloud ML Engine.""" validate_flags() job_spec = configure_job() job_name = job_spec["jobId"] tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name, pprint.pformat(job_spec)) assert confirm() train_dir = FLAGS.output_dir t2t_tar = tar_and_copy_t2t(train_dir) configure_trainer_package(job_spec, t2t_tar) if FLAGS.t2t_usr_dir: usr_tar = tar_and_copy_usr_dir(FLAGS.t2t_usr_dir, train_dir) configure_usr_dir(job_spec, usr_tar) launch_job(job_spec) tf.logging.info("Launched %s. See console to track: %s.", job_name, CONSOLE_URL) tf.logging.info("Interact with the training job from the command line:") tf.logging.info("Abort job: gcloud ml-engine jobs cancel %s", job_name) tf.logging.info("Stream logs: gcloud ml-engine jobs stream-logs %s", job_name) tf.logging.info("Open tensorboard: tensorboard --logdir %s", train_dir)
[ "def", "launch", "(", ")", ":", "validate_flags", "(", ")", "job_spec", "=", "configure_job", "(", ")", "job_name", "=", "job_spec", "[", "\"jobId\"", "]", "tf", ".", "logging", ".", "info", "(", "\"Launching job %s with ML Engine spec:\\n%s\"", ",", "job_name", ",", "pprint", ".", "pformat", "(", "job_spec", ")", ")", "assert", "confirm", "(", ")", "train_dir", "=", "FLAGS", ".", "output_dir", "t2t_tar", "=", "tar_and_copy_t2t", "(", "train_dir", ")", "configure_trainer_package", "(", "job_spec", ",", "t2t_tar", ")", "if", "FLAGS", ".", "t2t_usr_dir", ":", "usr_tar", "=", "tar_and_copy_usr_dir", "(", "FLAGS", ".", "t2t_usr_dir", ",", "train_dir", ")", "configure_usr_dir", "(", "job_spec", ",", "usr_tar", ")", "launch_job", "(", "job_spec", ")", "tf", ".", "logging", ".", "info", "(", "\"Launched %s. See console to track: %s.\"", ",", "job_name", ",", "CONSOLE_URL", ")", "tf", ".", "logging", ".", "info", "(", "\"Interact with the training job from the command line:\"", ")", "tf", ".", "logging", ".", "info", "(", "\"Abort job: gcloud ml-engine jobs cancel %s\"", ",", "job_name", ")", "tf", ".", "logging", ".", "info", "(", "\"Stream logs: gcloud ml-engine jobs stream-logs %s\"", ",", "job_name", ")", "tf", ".", "logging", ".", "info", "(", "\"Open tensorboard: tensorboard --logdir %s\"", ",", "train_dir", ")" ]
Launch t2t_trainer on Cloud ML Engine.
[ "Launch", "t2t_trainer", "on", "Cloud", "ML", "Engine", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L331-L351
22,513
tensorflow/tensor2tensor
tensor2tensor/layers/bayes.py
add_weight
def add_weight(cls): """Decorator for Layers, overriding add_weight for trainable initializers.""" @functools.wraps(cls.add_weight) def _add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, **kwargs): """Adds weight.""" if isinstance(initializer, tf.keras.layers.Layer): weight = initializer(shape, dtype) self._trainable_weights.extend(initializer.trainable_weights) # pylint: disable=protected-access self._non_trainable_weights.extend(initializer.non_trainable_weights) # pylint: disable=protected-access if regularizer is not None: # TODO(trandustin): Replace need for this with # Layer._handle_weight_regularization. For Eager compatibility, random # variable __init__s cannot apply TF ops (cl/220898007). def loss_fn(): """Creates a regularization loss `Tensor`.""" with tf.name_scope(name + '/Regularizer'): return regularizer(initializer(shape, dtype)) self.add_loss(loss_fn) return weight return super(cls, self).add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, **kwargs) cls.add_weight = _add_weight return cls
python
def add_weight(cls): """Decorator for Layers, overriding add_weight for trainable initializers.""" @functools.wraps(cls.add_weight) def _add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, **kwargs): """Adds weight.""" if isinstance(initializer, tf.keras.layers.Layer): weight = initializer(shape, dtype) self._trainable_weights.extend(initializer.trainable_weights) # pylint: disable=protected-access self._non_trainable_weights.extend(initializer.non_trainable_weights) # pylint: disable=protected-access if regularizer is not None: # TODO(trandustin): Replace need for this with # Layer._handle_weight_regularization. For Eager compatibility, random # variable __init__s cannot apply TF ops (cl/220898007). def loss_fn(): """Creates a regularization loss `Tensor`.""" with tf.name_scope(name + '/Regularizer'): return regularizer(initializer(shape, dtype)) self.add_loss(loss_fn) return weight return super(cls, self).add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, **kwargs) cls.add_weight = _add_weight return cls
[ "def", "add_weight", "(", "cls", ")", ":", "@", "functools", ".", "wraps", "(", "cls", ".", "add_weight", ")", "def", "_add_weight", "(", "self", ",", "name", "=", "None", ",", "shape", "=", "None", ",", "dtype", "=", "None", ",", "initializer", "=", "None", ",", "regularizer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Adds weight.\"\"\"", "if", "isinstance", "(", "initializer", ",", "tf", ".", "keras", ".", "layers", ".", "Layer", ")", ":", "weight", "=", "initializer", "(", "shape", ",", "dtype", ")", "self", ".", "_trainable_weights", ".", "extend", "(", "initializer", ".", "trainable_weights", ")", "# pylint: disable=protected-access", "self", ".", "_non_trainable_weights", ".", "extend", "(", "initializer", ".", "non_trainable_weights", ")", "# pylint: disable=protected-access", "if", "regularizer", "is", "not", "None", ":", "# TODO(trandustin): Replace need for this with", "# Layer._handle_weight_regularization. For Eager compatibility, random", "# variable __init__s cannot apply TF ops (cl/220898007).", "def", "loss_fn", "(", ")", ":", "\"\"\"Creates a regularization loss `Tensor`.\"\"\"", "with", "tf", ".", "name_scope", "(", "name", "+", "'/Regularizer'", ")", ":", "return", "regularizer", "(", "initializer", "(", "shape", ",", "dtype", ")", ")", "self", ".", "add_loss", "(", "loss_fn", ")", "return", "weight", "return", "super", "(", "cls", ",", "self", ")", ".", "add_weight", "(", "name", "=", "name", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "initializer", "=", "initializer", ",", "regularizer", "=", "regularizer", ",", "*", "*", "kwargs", ")", "cls", ".", "add_weight", "=", "_add_weight", "return", "cls" ]
Decorator for Layers, overriding add_weight for trainable initializers.
[ "Decorator", "for", "Layers", "overriding", "add_weight", "for", "trainable", "initializers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/bayes.py#L32-L64
22,514
tensorflow/tensor2tensor
tensor2tensor/models/video/base_vae.py
NextFrameBaseVae.get_beta
def get_beta(self, kl_loss=0.0): """Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration by taking steps of size hparams.latent_loss_multiplier_alpha. The same formulation can be retrieved by solving the Lagrangian with KL < epsilon as a constraint. Args: kl_loss: KL loss. Only used for dynamic adjustment. Returns: beta: the final value of beta. """ if self.hparams.latent_loss_multiplier_dynamic: beta = tf.Variable(self.hparams.latent_loss_multiplier, trainable=False, dtype=tf.float32) alpha = self.hparams.latent_loss_multiplier_alpha epsilon = self.hparams.latent_loss_multiplier_epsilon shadow_beta = beta + alpha * (kl_loss - epsilon) # Caping the beta between 0 and 1. May need to change this later on. shadow_beta = tf.maximum(shadow_beta, 0.0) shadow_beta = tf.minimum(shadow_beta, 1.0) update_op = tf.assign(beta, shadow_beta) else: beta = common_video.beta_schedule( schedule=self.hparams.latent_loss_multiplier_schedule, global_step=self.get_iteration_num(), final_beta=self.hparams.latent_loss_multiplier, decay_start=(self.hparams.num_iterations_1st_stage + self.hparams.num_iterations_2nd_stage), decay_end=self.hparams.anneal_end) update_op = tf.identity(beta) # fake update for regular beta. with tf.control_dependencies([update_op]): tf.summary.scalar("beta", beta) return beta
python
def get_beta(self, kl_loss=0.0): """Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration by taking steps of size hparams.latent_loss_multiplier_alpha. The same formulation can be retrieved by solving the Lagrangian with KL < epsilon as a constraint. Args: kl_loss: KL loss. Only used for dynamic adjustment. Returns: beta: the final value of beta. """ if self.hparams.latent_loss_multiplier_dynamic: beta = tf.Variable(self.hparams.latent_loss_multiplier, trainable=False, dtype=tf.float32) alpha = self.hparams.latent_loss_multiplier_alpha epsilon = self.hparams.latent_loss_multiplier_epsilon shadow_beta = beta + alpha * (kl_loss - epsilon) # Caping the beta between 0 and 1. May need to change this later on. shadow_beta = tf.maximum(shadow_beta, 0.0) shadow_beta = tf.minimum(shadow_beta, 1.0) update_op = tf.assign(beta, shadow_beta) else: beta = common_video.beta_schedule( schedule=self.hparams.latent_loss_multiplier_schedule, global_step=self.get_iteration_num(), final_beta=self.hparams.latent_loss_multiplier, decay_start=(self.hparams.num_iterations_1st_stage + self.hparams.num_iterations_2nd_stage), decay_end=self.hparams.anneal_end) update_op = tf.identity(beta) # fake update for regular beta. with tf.control_dependencies([update_op]): tf.summary.scalar("beta", beta) return beta
[ "def", "get_beta", "(", "self", ",", "kl_loss", "=", "0.0", ")", ":", "if", "self", ".", "hparams", ".", "latent_loss_multiplier_dynamic", ":", "beta", "=", "tf", ".", "Variable", "(", "self", ".", "hparams", ".", "latent_loss_multiplier", ",", "trainable", "=", "False", ",", "dtype", "=", "tf", ".", "float32", ")", "alpha", "=", "self", ".", "hparams", ".", "latent_loss_multiplier_alpha", "epsilon", "=", "self", ".", "hparams", ".", "latent_loss_multiplier_epsilon", "shadow_beta", "=", "beta", "+", "alpha", "*", "(", "kl_loss", "-", "epsilon", ")", "# Caping the beta between 0 and 1. May need to change this later on.", "shadow_beta", "=", "tf", ".", "maximum", "(", "shadow_beta", ",", "0.0", ")", "shadow_beta", "=", "tf", ".", "minimum", "(", "shadow_beta", ",", "1.0", ")", "update_op", "=", "tf", ".", "assign", "(", "beta", ",", "shadow_beta", ")", "else", ":", "beta", "=", "common_video", ".", "beta_schedule", "(", "schedule", "=", "self", ".", "hparams", ".", "latent_loss_multiplier_schedule", ",", "global_step", "=", "self", ".", "get_iteration_num", "(", ")", ",", "final_beta", "=", "self", ".", "hparams", ".", "latent_loss_multiplier", ",", "decay_start", "=", "(", "self", ".", "hparams", ".", "num_iterations_1st_stage", "+", "self", ".", "hparams", ".", "num_iterations_2nd_stage", ")", ",", "decay_end", "=", "self", ".", "hparams", ".", "anneal_end", ")", "update_op", "=", "tf", ".", "identity", "(", "beta", ")", "# fake update for regular beta.", "with", "tf", ".", "control_dependencies", "(", "[", "update_op", "]", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"beta\"", ",", "beta", ")", "return", "beta" ]
Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration by taking steps of size hparams.latent_loss_multiplier_alpha. The same formulation can be retrieved by solving the Lagrangian with KL < epsilon as a constraint. Args: kl_loss: KL loss. Only used for dynamic adjustment. Returns: beta: the final value of beta.
[ "Get", "the", "KL", "multiplier", "either", "dynamically", "or", "schedule", "based", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L34-L72
22,515
tensorflow/tensor2tensor
tensor2tensor/models/video/base_vae.py
NextFrameBaseVae.get_kl_loss
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None): """Get KL loss for all the predicted Gaussians.""" kl_loss = 0.0 if means_p is None: means_p = tf.unstack(tf.zeros_like(means)) if log_vars_p is None: log_vars_p = tf.unstack(tf.zeros_like(log_vars)) enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p)) if self.is_training and self.hparams.stochastic_model: for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs: kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p) tf.summary.histogram("posterior_mean_%d" % i, mean) tf.summary.histogram("posterior_log_var_%d" % i, log_var) tf.summary.histogram("prior_mean_%d" % i, mean_p) tf.summary.histogram("prior_log_var_%d" % i, log_var_p) tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss)) beta = self.get_beta(kl_loss) # information capacity from "Understanding disentangling in beta-VAE" if self.hparams.information_capacity > 0.0: kl_loss = tf.abs(kl_loss - self.hparams.information_capacity) return beta * kl_loss
python
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None): """Get KL loss for all the predicted Gaussians.""" kl_loss = 0.0 if means_p is None: means_p = tf.unstack(tf.zeros_like(means)) if log_vars_p is None: log_vars_p = tf.unstack(tf.zeros_like(log_vars)) enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p)) if self.is_training and self.hparams.stochastic_model: for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs: kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p) tf.summary.histogram("posterior_mean_%d" % i, mean) tf.summary.histogram("posterior_log_var_%d" % i, log_var) tf.summary.histogram("prior_mean_%d" % i, mean_p) tf.summary.histogram("prior_log_var_%d" % i, log_var_p) tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss)) beta = self.get_beta(kl_loss) # information capacity from "Understanding disentangling in beta-VAE" if self.hparams.information_capacity > 0.0: kl_loss = tf.abs(kl_loss - self.hparams.information_capacity) return beta * kl_loss
[ "def", "get_kl_loss", "(", "self", ",", "means", ",", "log_vars", ",", "means_p", "=", "None", ",", "log_vars_p", "=", "None", ")", ":", "kl_loss", "=", "0.0", "if", "means_p", "is", "None", ":", "means_p", "=", "tf", ".", "unstack", "(", "tf", ".", "zeros_like", "(", "means", ")", ")", "if", "log_vars_p", "is", "None", ":", "log_vars_p", "=", "tf", ".", "unstack", "(", "tf", ".", "zeros_like", "(", "log_vars", ")", ")", "enumerated_inputs", "=", "enumerate", "(", "zip", "(", "means", ",", "log_vars", ",", "means_p", ",", "log_vars_p", ")", ")", "if", "self", ".", "is_training", "and", "self", ".", "hparams", ".", "stochastic_model", ":", "for", "i", ",", "(", "mean", ",", "log_var", ",", "mean_p", ",", "log_var_p", ")", "in", "enumerated_inputs", ":", "kl_loss", "+=", "common_layers", ".", "kl_divergence", "(", "mean", ",", "log_var", ",", "mean_p", ",", "log_var_p", ")", "tf", ".", "summary", ".", "histogram", "(", "\"posterior_mean_%d\"", "%", "i", ",", "mean", ")", "tf", ".", "summary", ".", "histogram", "(", "\"posterior_log_var_%d\"", "%", "i", ",", "log_var", ")", "tf", ".", "summary", ".", "histogram", "(", "\"prior_mean_%d\"", "%", "i", ",", "mean_p", ")", "tf", ".", "summary", ".", "histogram", "(", "\"prior_log_var_%d\"", "%", "i", ",", "log_var_p", ")", "tf", ".", "summary", ".", "scalar", "(", "\"kl_raw\"", ",", "tf", ".", "reduce_mean", "(", "kl_loss", ")", ")", "beta", "=", "self", ".", "get_beta", "(", "kl_loss", ")", "# information capacity from \"Understanding disentangling in beta-VAE\"", "if", "self", ".", "hparams", ".", "information_capacity", ">", "0.0", ":", "kl_loss", "=", "tf", ".", "abs", "(", "kl_loss", "-", "self", ".", "hparams", ".", "information_capacity", ")", "return", "beta", "*", "kl_loss" ]
Get KL loss for all the predicted Gaussians.
[ "Get", "KL", "loss", "for", "all", "the", "predicted", "Gaussians", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L74-L95
22,516
tensorflow/tensor2tensor
tensor2tensor/models/video/base_vae.py
NextFrameBaseVae.construct_latent_tower
def construct_latent_tower(self, images, time_axis): """Create the latent tower.""" # No latent in the first phase first_phase = tf.less( self.get_iteration_num(), self.hparams.num_iterations_1st_stage) # use all frames by default but this allows more # predicted frames at inference time latent_num_frames = self.hparams.latent_num_frames tf.logging.info("Creating latent tower with %d frames." % latent_num_frames) if latent_num_frames > 0: images = images[:, :latent_num_frames] return common_video.conv_latent_tower( images=images, time_axis=time_axis, latent_channels=self.hparams.latent_channels, min_logvar=self.hparams.latent_std_min, is_training=self.is_training, random_latent=first_phase, tiny_mode=self.hparams.tiny_mode, small_mode=self.hparams.small_mode)
python
def construct_latent_tower(self, images, time_axis): """Create the latent tower.""" # No latent in the first phase first_phase = tf.less( self.get_iteration_num(), self.hparams.num_iterations_1st_stage) # use all frames by default but this allows more # predicted frames at inference time latent_num_frames = self.hparams.latent_num_frames tf.logging.info("Creating latent tower with %d frames." % latent_num_frames) if latent_num_frames > 0: images = images[:, :latent_num_frames] return common_video.conv_latent_tower( images=images, time_axis=time_axis, latent_channels=self.hparams.latent_channels, min_logvar=self.hparams.latent_std_min, is_training=self.is_training, random_latent=first_phase, tiny_mode=self.hparams.tiny_mode, small_mode=self.hparams.small_mode)
[ "def", "construct_latent_tower", "(", "self", ",", "images", ",", "time_axis", ")", ":", "# No latent in the first phase", "first_phase", "=", "tf", ".", "less", "(", "self", ".", "get_iteration_num", "(", ")", ",", "self", ".", "hparams", ".", "num_iterations_1st_stage", ")", "# use all frames by default but this allows more", "# predicted frames at inference time", "latent_num_frames", "=", "self", ".", "hparams", ".", "latent_num_frames", "tf", ".", "logging", ".", "info", "(", "\"Creating latent tower with %d frames.\"", "%", "latent_num_frames", ")", "if", "latent_num_frames", ">", "0", ":", "images", "=", "images", "[", ":", ",", ":", "latent_num_frames", "]", "return", "common_video", ".", "conv_latent_tower", "(", "images", "=", "images", ",", "time_axis", "=", "time_axis", ",", "latent_channels", "=", "self", ".", "hparams", ".", "latent_channels", ",", "min_logvar", "=", "self", ".", "hparams", ".", "latent_std_min", ",", "is_training", "=", "self", ".", "is_training", ",", "random_latent", "=", "first_phase", ",", "tiny_mode", "=", "self", ".", "hparams", ".", "tiny_mode", ",", "small_mode", "=", "self", ".", "hparams", ".", "small_mode", ")" ]
Create the latent tower.
[ "Create", "the", "latent", "tower", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L97-L118
22,517
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_encode
def transformer_encode(encoder_function, inputs, target_space, hparams, attention_weights=None, features=None, losses=None, **kwargs): """Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights: weight to store attention to. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to encoder_function Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] """ inputs = common_layers.flatten4d3d(inputs) encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( transformer_prepare_encoder( inputs, target_space, hparams, features=features)) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.layer_prepostprocess_dropout) attn_bias_for_padding = None # Otherwise the encoder will just use encoder_self_attention_bias. if hparams.unidirectional_encoder: attn_bias_for_padding = encoder_decoder_attention_bias encoder_output = encoder_function( encoder_input, self_attention_bias, hparams, nonpadding=features_to_nonpadding(features, "inputs"), save_weights_to=attention_weights, make_image_summary=not common_layers.is_xla_compiled(), losses=losses, attn_bias_for_padding=attn_bias_for_padding, **kwargs) return encoder_output, encoder_decoder_attention_bias
python
def transformer_encode(encoder_function, inputs, target_space, hparams, attention_weights=None, features=None, losses=None, **kwargs): """Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights: weight to store attention to. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to encoder_function Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] """ inputs = common_layers.flatten4d3d(inputs) encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( transformer_prepare_encoder( inputs, target_space, hparams, features=features)) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.layer_prepostprocess_dropout) attn_bias_for_padding = None # Otherwise the encoder will just use encoder_self_attention_bias. if hparams.unidirectional_encoder: attn_bias_for_padding = encoder_decoder_attention_bias encoder_output = encoder_function( encoder_input, self_attention_bias, hparams, nonpadding=features_to_nonpadding(features, "inputs"), save_weights_to=attention_weights, make_image_summary=not common_layers.is_xla_compiled(), losses=losses, attn_bias_for_padding=attn_bias_for_padding, **kwargs) return encoder_output, encoder_decoder_attention_bias
[ "def", "transformer_encode", "(", "encoder_function", ",", "inputs", ",", "target_space", ",", "hparams", ",", "attention_weights", "=", "None", ",", "features", "=", "None", ",", "losses", "=", "None", ",", "*", "*", "kwargs", ")", ":", "inputs", "=", "common_layers", ".", "flatten4d3d", "(", "inputs", ")", "encoder_input", ",", "self_attention_bias", ",", "encoder_decoder_attention_bias", "=", "(", "transformer_prepare_encoder", "(", "inputs", ",", "target_space", ",", "hparams", ",", "features", "=", "features", ")", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "MODEL_HP_LAYER_POSTPROCESS_DROPOUT", ",", "value", "=", "hparams", ".", "layer_prepostprocess_dropout", ",", "hparams", "=", "hparams", ")", "encoder_input", "=", "tf", ".", "nn", ".", "dropout", "(", "encoder_input", ",", "1.0", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "attn_bias_for_padding", "=", "None", "# Otherwise the encoder will just use encoder_self_attention_bias.", "if", "hparams", ".", "unidirectional_encoder", ":", "attn_bias_for_padding", "=", "encoder_decoder_attention_bias", "encoder_output", "=", "encoder_function", "(", "encoder_input", ",", "self_attention_bias", ",", "hparams", ",", "nonpadding", "=", "features_to_nonpadding", "(", "features", ",", "\"inputs\"", ")", ",", "save_weights_to", "=", "attention_weights", ",", "make_image_summary", "=", "not", "common_layers", ".", "is_xla_compiled", "(", ")", ",", "losses", "=", "losses", ",", "attn_bias_for_padding", "=", "attn_bias_for_padding", ",", "*", "*", "kwargs", ")", "return", "encoder_output", ",", "encoder_decoder_attention_bias" ]
Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights: weight to store attention to. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to encoder_function Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length]
[ "Encode", "transformer", "inputs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L57-L111
22,518
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_decode
def transformer_decode(decoder_function, decoder_input, encoder_output, encoder_decoder_attention_bias, decoder_self_attention_bias, hparams, attention_weights=None, cache=None, decode_loop_step=None, nonpadding=None, losses=None, **kwargs): """Decode Transformer outputs from encoder representation. Args: decoder_function: the decoder function decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] hparams: hyperparameters for model. attention_weights: weight to store attention to. cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. nonpadding: optional Tensor with shape [batch_size, decoder_length] losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to decoder_function Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim] """ mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout) decoder_output = decoder_function( decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=cache, decode_loop_step=decode_loop_step, nonpadding=nonpadding, save_weights_to=attention_weights, losses=losses, **kwargs) if (common_layers.is_xla_compiled() and hparams.mode == tf.estimator.ModeKeys.TRAIN): # TPU does not react kindly to extra dimensions. # TODO(noam): remove this once TPU is more forgiving of extra dims. return decoder_output else: # Expand since t2t expects 4d tensors. return tf.expand_dims(decoder_output, axis=2)
python
def transformer_decode(decoder_function, decoder_input, encoder_output, encoder_decoder_attention_bias, decoder_self_attention_bias, hparams, attention_weights=None, cache=None, decode_loop_step=None, nonpadding=None, losses=None, **kwargs): """Decode Transformer outputs from encoder representation. Args: decoder_function: the decoder function decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] hparams: hyperparameters for model. attention_weights: weight to store attention to. cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. nonpadding: optional Tensor with shape [batch_size, decoder_length] losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to decoder_function Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim] """ mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout) decoder_output = decoder_function( decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=cache, decode_loop_step=decode_loop_step, nonpadding=nonpadding, save_weights_to=attention_weights, losses=losses, **kwargs) if (common_layers.is_xla_compiled() and hparams.mode == tf.estimator.ModeKeys.TRAIN): # TPU does not react kindly to extra dimensions. # TODO(noam): remove this once TPU is more forgiving of extra dims. return decoder_output else: # Expand since t2t expects 4d tensors. return tf.expand_dims(decoder_output, axis=2)
[ "def", "transformer_decode", "(", "decoder_function", ",", "decoder_input", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "decoder_self_attention_bias", ",", "hparams", ",", "attention_weights", "=", "None", ",", "cache", "=", "None", ",", "decode_loop_step", "=", "None", ",", "nonpadding", "=", "None", ",", "losses", "=", "None", ",", "*", "*", "kwargs", ")", ":", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "MODEL_HP_LAYER_POSTPROCESS_DROPOUT", ",", "value", "=", "hparams", ".", "layer_prepostprocess_dropout", ",", "hparams", "=", "hparams", ")", "decoder_input", "=", "tf", ".", "nn", ".", "dropout", "(", "decoder_input", ",", "1.0", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "decoder_output", "=", "decoder_function", "(", "decoder_input", ",", "encoder_output", ",", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "hparams", ",", "cache", "=", "cache", ",", "decode_loop_step", "=", "decode_loop_step", ",", "nonpadding", "=", "nonpadding", ",", "save_weights_to", "=", "attention_weights", ",", "losses", "=", "losses", ",", "*", "*", "kwargs", ")", "if", "(", "common_layers", ".", "is_xla_compiled", "(", ")", "and", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", ":", "# TPU does not react kindly to extra dimensions.", "# TODO(noam): remove this once TPU is more forgiving of extra dims.", "return", "decoder_output", "else", ":", "# Expand since t2t expects 4d tensors.", "return", "tf", ".", "expand_dims", "(", "decoder_output", ",", "axis", "=", "2", ")" ]
Decode Transformer outputs from encoder representation. Args: decoder_function: the decoder function decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] hparams: hyperparameters for model. attention_weights: weight to store attention to. cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. nonpadding: optional Tensor with shape [batch_size, decoder_length] losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to decoder_function Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim]
[ "Decode", "Transformer", "outputs", "from", "encoder", "representation", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L114-L178
22,519
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
_init_transformer_cache
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length, encoder_output, encoder_decoder_attention_bias, scope_prefix): """Create the initial cache for Transformer fast decoding.""" key_channels = hparams.attention_key_channels or hparams.hidden_size value_channels = hparams.attention_value_channels or hparams.hidden_size num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers vars_3d_num_heads = ( hparams.num_heads if hparams.get("attention_variables_3d") else 0) if cache is None: cache = {} cache.update({ "layer_%d" % layer: { # pylint: disable=g-complex-comprehension "k": common_attention.split_heads( tf.zeros([batch_size, attention_init_length, key_channels]), hparams.num_heads), "v": common_attention.split_heads( tf.zeros([batch_size, attention_init_length, value_channels]), hparams.num_heads), } for layer in range(num_layers) }) # If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the # cache key "f" won't be used, which means that the` shape of cache["f"]` # won't be changed to # `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause # error when applying `nest.map reshape function` on it. if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]: for layer in range(num_layers): cache["layer_%d" % layer]["f"] = tf.zeros( [batch_size, 0, hparams.hidden_size]) if encoder_output is not None: for layer in range(num_layers): layer_name = "layer_%d" % layer with tf.variable_scope( "%sdecoder/%s/encdec_attention/multihead_attention" % (scope_prefix, layer_name)): k_encdec = common_attention.compute_attention_component( encoder_output, key_channels, name="k", vars_3d_num_heads=vars_3d_num_heads) k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads) v_encdec = common_attention.compute_attention_component( encoder_output, value_channels, name="v", vars_3d_num_heads=vars_3d_num_heads) v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads) cache[layer_name]["k_encdec"] = k_encdec cache[layer_name]["v_encdec"] = v_encdec cache["encoder_output"] = encoder_output cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias return cache
python
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length, encoder_output, encoder_decoder_attention_bias, scope_prefix): """Create the initial cache for Transformer fast decoding.""" key_channels = hparams.attention_key_channels or hparams.hidden_size value_channels = hparams.attention_value_channels or hparams.hidden_size num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers vars_3d_num_heads = ( hparams.num_heads if hparams.get("attention_variables_3d") else 0) if cache is None: cache = {} cache.update({ "layer_%d" % layer: { # pylint: disable=g-complex-comprehension "k": common_attention.split_heads( tf.zeros([batch_size, attention_init_length, key_channels]), hparams.num_heads), "v": common_attention.split_heads( tf.zeros([batch_size, attention_init_length, value_channels]), hparams.num_heads), } for layer in range(num_layers) }) # If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the # cache key "f" won't be used, which means that the` shape of cache["f"]` # won't be changed to # `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause # error when applying `nest.map reshape function` on it. if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]: for layer in range(num_layers): cache["layer_%d" % layer]["f"] = tf.zeros( [batch_size, 0, hparams.hidden_size]) if encoder_output is not None: for layer in range(num_layers): layer_name = "layer_%d" % layer with tf.variable_scope( "%sdecoder/%s/encdec_attention/multihead_attention" % (scope_prefix, layer_name)): k_encdec = common_attention.compute_attention_component( encoder_output, key_channels, name="k", vars_3d_num_heads=vars_3d_num_heads) k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads) v_encdec = common_attention.compute_attention_component( encoder_output, value_channels, name="v", vars_3d_num_heads=vars_3d_num_heads) v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads) cache[layer_name]["k_encdec"] = k_encdec cache[layer_name]["v_encdec"] = v_encdec cache["encoder_output"] = encoder_output cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias return cache
[ "def", "_init_transformer_cache", "(", "cache", ",", "hparams", ",", "batch_size", ",", "attention_init_length", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "scope_prefix", ")", ":", "key_channels", "=", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", "value_channels", "=", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", "num_layers", "=", "hparams", ".", "num_decoder_layers", "or", "hparams", ".", "num_hidden_layers", "vars_3d_num_heads", "=", "(", "hparams", ".", "num_heads", "if", "hparams", ".", "get", "(", "\"attention_variables_3d\"", ")", "else", "0", ")", "if", "cache", "is", "None", ":", "cache", "=", "{", "}", "cache", ".", "update", "(", "{", "\"layer_%d\"", "%", "layer", ":", "{", "# pylint: disable=g-complex-comprehension", "\"k\"", ":", "common_attention", ".", "split_heads", "(", "tf", ".", "zeros", "(", "[", "batch_size", ",", "attention_init_length", ",", "key_channels", "]", ")", ",", "hparams", ".", "num_heads", ")", ",", "\"v\"", ":", "common_attention", ".", "split_heads", "(", "tf", ".", "zeros", "(", "[", "batch_size", ",", "attention_init_length", ",", "value_channels", "]", ")", ",", "hparams", ".", "num_heads", ")", ",", "}", "for", "layer", "in", "range", "(", "num_layers", ")", "}", ")", "# If `ffn_layer` is in `[\"dense_relu_dense\" or \"conv_hidden_relu\"]`, then the", "# cache key \"f\" won't be used, which means that the` shape of cache[\"f\"]`", "# won't be changed to", "# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause", "# error when applying `nest.map reshape function` on it.", "if", "hparams", ".", "ffn_layer", "not", "in", "[", "\"dense_relu_dense\"", ",", "\"conv_hidden_relu\"", "]", ":", "for", "layer", "in", "range", "(", "num_layers", ")", ":", "cache", "[", "\"layer_%d\"", "%", "layer", "]", "[", "\"f\"", "]", "=", "tf", ".", "zeros", "(", "[", "batch_size", ",", "0", ",", "hparams", ".", "hidden_size", "]", ")", "if", "encoder_output", "is", "not", "None", ":", "for", "layer", "in", "range", "(", "num_layers", ")", ":", "layer_name", "=", "\"layer_%d\"", "%", "layer", "with", "tf", ".", "variable_scope", "(", "\"%sdecoder/%s/encdec_attention/multihead_attention\"", "%", "(", "scope_prefix", ",", "layer_name", ")", ")", ":", "k_encdec", "=", "common_attention", ".", "compute_attention_component", "(", "encoder_output", ",", "key_channels", ",", "name", "=", "\"k\"", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ")", "k_encdec", "=", "common_attention", ".", "split_heads", "(", "k_encdec", ",", "hparams", ".", "num_heads", ")", "v_encdec", "=", "common_attention", ".", "compute_attention_component", "(", "encoder_output", ",", "value_channels", ",", "name", "=", "\"v\"", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ")", "v_encdec", "=", "common_attention", ".", "split_heads", "(", "v_encdec", ",", "hparams", ".", "num_heads", ")", "cache", "[", "layer_name", "]", "[", "\"k_encdec\"", "]", "=", "k_encdec", "cache", "[", "layer_name", "]", "[", "\"v_encdec\"", "]", "=", "v_encdec", "cache", "[", "\"encoder_output\"", "]", "=", "encoder_output", "cache", "[", "\"encoder_decoder_attention_bias\"", "]", "=", "encoder_decoder_attention_bias", "return", "cache" ]
Create the initial cache for Transformer fast decoding.
[ "Create", "the", "initial", "cache", "for", "Transformer", "fast", "decoding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L832-L892
22,520
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_base_vq_ada_32ex_packed
def transformer_base_vq_ada_32ex_packed(): """Set of hyperparameters for lm1b packed following tpu params.""" hparams = transformer_base_v2() expert_utils.update_hparams_for_vq_gating(hparams) hparams.moe_num_experts = 32 hparams.gating_type = "vq" # this gives us a batch size of 16 because each seq is len 256 hparams.batch_size = 5072 hparams.ffn_layer = "local_moe" hparams.shared_embedding_and_softmax_weights = False hparams.learning_rate_warmup_steps = 10000 # one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128 hparams.learning_rate_decay_steps = 27200 hparams.num_heads = 4 hparams.num_blocks = 1 hparams.moe_k = 1 hparams.num_decoder_layers = 6 hparams.label_smoothing = 0. hparams.layer_prepostprocess_dropout = 0.1 hparams.layer_postprocess_sequence = "dan" hparams.layer_preprocess_sequence = "none" hparams.weight_decay = 1e-06 hparams.attention_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" hparams.activation_dtype = "float32" hparams.learning_rate = 0.1 hparams.learning_rate_constant = 1.0 return hparams
python
def transformer_base_vq_ada_32ex_packed(): """Set of hyperparameters for lm1b packed following tpu params.""" hparams = transformer_base_v2() expert_utils.update_hparams_for_vq_gating(hparams) hparams.moe_num_experts = 32 hparams.gating_type = "vq" # this gives us a batch size of 16 because each seq is len 256 hparams.batch_size = 5072 hparams.ffn_layer = "local_moe" hparams.shared_embedding_and_softmax_weights = False hparams.learning_rate_warmup_steps = 10000 # one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128 hparams.learning_rate_decay_steps = 27200 hparams.num_heads = 4 hparams.num_blocks = 1 hparams.moe_k = 1 hparams.num_decoder_layers = 6 hparams.label_smoothing = 0. hparams.layer_prepostprocess_dropout = 0.1 hparams.layer_postprocess_sequence = "dan" hparams.layer_preprocess_sequence = "none" hparams.weight_decay = 1e-06 hparams.attention_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" hparams.activation_dtype = "float32" hparams.learning_rate = 0.1 hparams.learning_rate_constant = 1.0 return hparams
[ "def", "transformer_base_vq_ada_32ex_packed", "(", ")", ":", "hparams", "=", "transformer_base_v2", "(", ")", "expert_utils", ".", "update_hparams_for_vq_gating", "(", "hparams", ")", "hparams", ".", "moe_num_experts", "=", "32", "hparams", ".", "gating_type", "=", "\"vq\"", "# this gives us a batch size of 16 because each seq is len 256", "hparams", ".", "batch_size", "=", "5072", "hparams", ".", "ffn_layer", "=", "\"local_moe\"", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "hparams", ".", "learning_rate_warmup_steps", "=", "10000", "# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128", "hparams", ".", "learning_rate_decay_steps", "=", "27200", "hparams", ".", "num_heads", "=", "4", "hparams", ".", "num_blocks", "=", "1", "hparams", ".", "moe_k", "=", "1", "hparams", ".", "num_decoder_layers", "=", "6", "hparams", ".", "label_smoothing", "=", "0.", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "hparams", ".", "layer_postprocess_sequence", "=", "\"dan\"", "hparams", ".", "layer_preprocess_sequence", "=", "\"none\"", "hparams", ".", "weight_decay", "=", "1e-06", "hparams", ".", "attention_dropout", "=", "0.1", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_schedule", "=", "\"linear_warmup*rsqrt_decay*linear_decay\"", "hparams", ".", "activation_dtype", "=", "\"float32\"", "hparams", ".", "learning_rate", "=", "0.1", "hparams", ".", "learning_rate_constant", "=", "1.0", "return", "hparams" ]
Set of hyperparameters for lm1b packed following tpu params.
[ "Set", "of", "hyperparameters", "for", "lm1b", "packed", "following", "tpu", "params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1651-L1679
22,521
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_base_v3
def transformer_base_v3(): """Base parameters for Transformer model.""" # Update parameters here, then occasionally cut a versioned set, e.g. # transformer_base_v2. hparams = transformer_base_v2() hparams.optimizer_adam_beta2 = 0.997 # New way of specifying learning rate schedule. # Equivalent to previous version. hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size") hparams.learning_rate_constant = 2.0 return hparams
python
def transformer_base_v3(): """Base parameters for Transformer model.""" # Update parameters here, then occasionally cut a versioned set, e.g. # transformer_base_v2. hparams = transformer_base_v2() hparams.optimizer_adam_beta2 = 0.997 # New way of specifying learning rate schedule. # Equivalent to previous version. hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size") hparams.learning_rate_constant = 2.0 return hparams
[ "def", "transformer_base_v3", "(", ")", ":", "# Update parameters here, then occasionally cut a versioned set, e.g.", "# transformer_base_v2.", "hparams", "=", "transformer_base_v2", "(", ")", "hparams", ".", "optimizer_adam_beta2", "=", "0.997", "# New way of specifying learning rate schedule.", "# Equivalent to previous version.", "hparams", ".", "learning_rate_schedule", "=", "(", "\"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"", ")", "hparams", ".", "learning_rate_constant", "=", "2.0", "return", "hparams" ]
Base parameters for Transformer model.
[ "Base", "parameters", "for", "Transformer", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1754-L1765
22,522
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_big
def transformer_big(): """HParams for transformer big model on WMT.""" hparams = transformer_base() hparams.hidden_size = 1024 hparams.filter_size = 4096 # Reduce batch size to 2048 from 4096 to be able to train the model on a GPU # with 12 GB memory. For example, NVIDIA TITAN V GPU. hparams.batch_size = 2048 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 return hparams
python
def transformer_big(): """HParams for transformer big model on WMT.""" hparams = transformer_base() hparams.hidden_size = 1024 hparams.filter_size = 4096 # Reduce batch size to 2048 from 4096 to be able to train the model on a GPU # with 12 GB memory. For example, NVIDIA TITAN V GPU. hparams.batch_size = 2048 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 return hparams
[ "def", "transformer_big", "(", ")", ":", "hparams", "=", "transformer_base", "(", ")", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "filter_size", "=", "4096", "# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU", "# with 12 GB memory. For example, NVIDIA TITAN V GPU.", "hparams", ".", "batch_size", "=", "2048", "hparams", ".", "num_heads", "=", "16", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.3", "return", "hparams" ]
HParams for transformer big model on WMT.
[ "HParams", "for", "transformer", "big", "model", "on", "WMT", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1776-L1786
22,523
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_tall_finetune_textclass
def transformer_tall_finetune_textclass(): """Hparams for transformer on LM for finetuning on text class problems.""" hparams = transformer_tall() hparams.learning_rate_constant = 6.25e-5 hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay") hparams.multiproblem_schedule_max_examples = 0 hparams.multiproblem_target_eval_only = True hparams.learning_rate_warmup_steps = 50 # Set train steps to learning_rate_decay_steps or less hparams.learning_rate_decay_steps = 25000 hparams.multiproblem_reweight_label_loss = True hparams.multiproblem_label_weight = 0.95 return hparams
python
def transformer_tall_finetune_textclass(): """Hparams for transformer on LM for finetuning on text class problems.""" hparams = transformer_tall() hparams.learning_rate_constant = 6.25e-5 hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay") hparams.multiproblem_schedule_max_examples = 0 hparams.multiproblem_target_eval_only = True hparams.learning_rate_warmup_steps = 50 # Set train steps to learning_rate_decay_steps or less hparams.learning_rate_decay_steps = 25000 hparams.multiproblem_reweight_label_loss = True hparams.multiproblem_label_weight = 0.95 return hparams
[ "def", "transformer_tall_finetune_textclass", "(", ")", ":", "hparams", "=", "transformer_tall", "(", ")", "hparams", ".", "learning_rate_constant", "=", "6.25e-5", "hparams", ".", "learning_rate_schedule", "=", "(", "\"linear_warmup*constant*linear_decay\"", ")", "hparams", ".", "multiproblem_schedule_max_examples", "=", "0", "hparams", ".", "multiproblem_target_eval_only", "=", "True", "hparams", ".", "learning_rate_warmup_steps", "=", "50", "# Set train steps to learning_rate_decay_steps or less", "hparams", ".", "learning_rate_decay_steps", "=", "25000", "hparams", ".", "multiproblem_reweight_label_loss", "=", "True", "hparams", ".", "multiproblem_label_weight", "=", "0.95", "return", "hparams" ]
Hparams for transformer on LM for finetuning on text class problems.
[ "Hparams", "for", "transformer", "on", "LM", "for", "finetuning", "on", "text", "class", "problems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1875-L1887
22,524
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_tall_pretrain_lm_tpu_adafactor_large
def transformer_tall_pretrain_lm_tpu_adafactor_large(): """Hparams for transformer on LM pretraining on TPU, large model.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 hparams.batch_size = 4 hparams.multiproblem_mixing_schedule = "constant" # Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad. hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5" return hparams
python
def transformer_tall_pretrain_lm_tpu_adafactor_large(): """Hparams for transformer on LM pretraining on TPU, large model.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 hparams.batch_size = 4 hparams.multiproblem_mixing_schedule = "constant" # Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad. hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5" return hparams
[ "def", "transformer_tall_pretrain_lm_tpu_adafactor_large", "(", ")", ":", "hparams", "=", "transformer_tall_pretrain_lm_tpu_adafactor", "(", ")", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "num_heads", "=", "16", "hparams", ".", "filter_size", "=", "32768", "# max fitting in 16G memory is 49152, batch 2", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "multiproblem_mixing_schedule", "=", "\"constant\"", "# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.", "hparams", ".", "multiproblem_per_task_threshold", "=", "\"320,80,160,1,80,160,2,20,10,5\"", "return", "hparams" ]
Hparams for transformer on LM pretraining on TPU, large model.
[ "Hparams", "for", "transformer", "on", "LM", "pretraining", "on", "TPU", "large", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1921-L1931
22,525
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_tall_pretrain_lm_tpu
def transformer_tall_pretrain_lm_tpu(): """Hparams for transformer on LM pretraining on TPU with AdamW.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() # Optimizer gets reset in update_hparams_for_tpu so we set it again here. hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay") hparams.optimizer = "adam_w" return hparams
python
def transformer_tall_pretrain_lm_tpu(): """Hparams for transformer on LM pretraining on TPU with AdamW.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() # Optimizer gets reset in update_hparams_for_tpu so we set it again here. hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay") hparams.optimizer = "adam_w" return hparams
[ "def", "transformer_tall_pretrain_lm_tpu", "(", ")", ":", "hparams", "=", "transformer_tall_pretrain_lm_tpu_adafactor", "(", ")", "# Optimizer gets reset in update_hparams_for_tpu so we set it again here.", "hparams", ".", "learning_rate_constant", "=", "2e-4", "hparams", ".", "learning_rate_schedule", "=", "(", "\"linear_warmup * constant * cosdecay\"", ")", "hparams", ".", "optimizer", "=", "\"adam_w\"", "return", "hparams" ]
Hparams for transformer on LM pretraining on TPU with AdamW.
[ "Hparams", "for", "transformer", "on", "LM", "pretraining", "on", "TPU", "with", "AdamW", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1935-L1942
22,526
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_base_single_gpu
def transformer_base_single_gpu(): """HParams for transformer base model for single GPU.""" hparams = transformer_base() hparams.batch_size = 1024 hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" hparams.learning_rate_constant = 0.1 hparams.learning_rate_warmup_steps = 16000 return hparams
python
def transformer_base_single_gpu(): """HParams for transformer base model for single GPU.""" hparams = transformer_base() hparams.batch_size = 1024 hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" hparams.learning_rate_constant = 0.1 hparams.learning_rate_warmup_steps = 16000 return hparams
[ "def", "transformer_base_single_gpu", "(", ")", ":", "hparams", "=", "transformer_base", "(", ")", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "learning_rate_schedule", "=", "\"constant*linear_warmup*rsqrt_decay\"", "hparams", ".", "learning_rate_constant", "=", "0.1", "hparams", ".", "learning_rate_warmup_steps", "=", "16000", "return", "hparams" ]
HParams for transformer base model for single GPU.
[ "HParams", "for", "transformer", "base", "model", "for", "single", "GPU", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1963-L1970
22,527
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_parsing_base
def transformer_parsing_base(): """HParams for parsing on WSJ only.""" hparams = transformer_base() hparams.attention_dropout = 0.2 hparams.layer_prepostprocess_dropout = 0.2 hparams.max_length = 512 hparams.learning_rate_warmup_steps = 16000 hparams.hidden_size = 1024 hparams.learning_rate = 0.05 hparams.shared_embedding_and_softmax_weights = False return hparams
python
def transformer_parsing_base(): """HParams for parsing on WSJ only.""" hparams = transformer_base() hparams.attention_dropout = 0.2 hparams.layer_prepostprocess_dropout = 0.2 hparams.max_length = 512 hparams.learning_rate_warmup_steps = 16000 hparams.hidden_size = 1024 hparams.learning_rate = 0.05 hparams.shared_embedding_and_softmax_weights = False return hparams
[ "def", "transformer_parsing_base", "(", ")", ":", "hparams", "=", "transformer_base", "(", ")", "hparams", ".", "attention_dropout", "=", "0.2", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.2", "hparams", ".", "max_length", "=", "512", "hparams", ".", "learning_rate_warmup_steps", "=", "16000", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "learning_rate", "=", "0.05", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "return", "hparams" ]
HParams for parsing on WSJ only.
[ "HParams", "for", "parsing", "on", "WSJ", "only", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1983-L1993
22,528
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_parsing_big
def transformer_parsing_big(): """HParams for parsing on WSJ semi-supervised.""" hparams = transformer_big() hparams.max_length = 512 hparams.shared_source_target_embedding = False hparams.learning_rate_warmup_steps = 4000 hparams.layer_prepostprocess_dropout = 0.1 hparams.batch_size = 2048 hparams.learning_rate = 0.05 return hparams
python
def transformer_parsing_big(): """HParams for parsing on WSJ semi-supervised.""" hparams = transformer_big() hparams.max_length = 512 hparams.shared_source_target_embedding = False hparams.learning_rate_warmup_steps = 4000 hparams.layer_prepostprocess_dropout = 0.1 hparams.batch_size = 2048 hparams.learning_rate = 0.05 return hparams
[ "def", "transformer_parsing_big", "(", ")", ":", "hparams", "=", "transformer_big", "(", ")", "hparams", ".", "max_length", "=", "512", "hparams", ".", "shared_source_target_embedding", "=", "False", "hparams", ".", "learning_rate_warmup_steps", "=", "4000", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "hparams", ".", "batch_size", "=", "2048", "hparams", ".", "learning_rate", "=", "0.05", "return", "hparams" ]
HParams for parsing on WSJ semi-supervised.
[ "HParams", "for", "parsing", "on", "WSJ", "semi", "-", "supervised", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1997-L2006
22,529
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_relative
def transformer_relative(): """Use relative position embeddings instead of absolute position encodings.""" hparams = transformer_base() hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 20 return hparams
python
def transformer_relative(): """Use relative position embeddings instead of absolute position encodings.""" hparams = transformer_base() hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 20 return hparams
[ "def", "transformer_relative", "(", ")", ":", "hparams", "=", "transformer_base", "(", ")", "hparams", ".", "pos", "=", "None", "hparams", ".", "self_attention_type", "=", "\"dot_product_relative\"", "hparams", ".", "max_relative_position", "=", "20", "return", "hparams" ]
Use relative position embeddings instead of absolute position encodings.
[ "Use", "relative", "position", "embeddings", "instead", "of", "absolute", "position", "encodings", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2263-L2269
22,530
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_mlperf_tpu
def transformer_mlperf_tpu(): """HParams for Transformer model on TPU for MLPerf on TPU 2x2.""" hparams = transformer_base_v3() hparams.mlperf_mode = True hparams.symbol_modality_num_shards = 1 hparams.max_length = 256 # ignored when using "_packed" problems hparams.batch_size = 2048 # per-chip batch size matches the reference model hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams
python
def transformer_mlperf_tpu(): """HParams for Transformer model on TPU for MLPerf on TPU 2x2.""" hparams = transformer_base_v3() hparams.mlperf_mode = True hparams.symbol_modality_num_shards = 1 hparams.max_length = 256 # ignored when using "_packed" problems hparams.batch_size = 2048 # per-chip batch size matches the reference model hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams
[ "def", "transformer_mlperf_tpu", "(", ")", ":", "hparams", "=", "transformer_base_v3", "(", ")", "hparams", ".", "mlperf_mode", "=", "True", "hparams", ".", "symbol_modality_num_shards", "=", "1", "hparams", ".", "max_length", "=", "256", "# ignored when using \"_packed\" problems", "hparams", ".", "batch_size", "=", "2048", "# per-chip batch size matches the reference model", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "filter_size", "=", "4096", "hparams", ".", "num_heads", "=", "16", "hparams", ".", "attention_dropout_broadcast_dims", "=", "\"0,1\"", "# batch, heads", "hparams", ".", "relu_dropout_broadcast_dims", "=", "\"1\"", "# length", "hparams", ".", "layer_prepostprocess_dropout_broadcast_dims", "=", "\"1\"", "# length", "return", "hparams" ]
HParams for Transformer model on TPU for MLPerf on TPU 2x2.
[ "HParams", "for", "Transformer", "model", "on", "TPU", "for", "MLPerf", "on", "TPU", "2x2", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2300-L2313
22,531
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
update_hparams_for_tpu
def update_hparams_for_tpu(hparams): """Change hparams to be compatible with TPU training.""" # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 # Adaptive batch sizes and sequence lengths are not supported on TPU. # Instead, every batch has the same sequence length and the same batch size. # Longer sequences are dropped and shorter ones are padded. # # It is therefore suggested to use a problem where examples have been combined # to a longer length, e.g. the "_packed" problems. # # For problems with variable sequence lengths, this parameter controls the # maximum sequence length. Shorter sequences are dropped and longer ones # are padded. # # For problems with fixed sequence lengths - e.g. the "_packed" problems, # this hyperparameter is ignored. hparams.max_length = 64 # TPUs have less memory than GPUs, so decrease the batch size hparams.batch_size = 2048 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams
python
def update_hparams_for_tpu(hparams): """Change hparams to be compatible with TPU training.""" # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 # Adaptive batch sizes and sequence lengths are not supported on TPU. # Instead, every batch has the same sequence length and the same batch size. # Longer sequences are dropped and shorter ones are padded. # # It is therefore suggested to use a problem where examples have been combined # to a longer length, e.g. the "_packed" problems. # # For problems with variable sequence lengths, this parameter controls the # maximum sequence length. Shorter sequences are dropped and longer ones # are padded. # # For problems with fixed sequence lengths - e.g. the "_packed" problems, # this hyperparameter is ignored. hparams.max_length = 64 # TPUs have less memory than GPUs, so decrease the batch size hparams.batch_size = 2048 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams
[ "def", "update_hparams_for_tpu", "(", "hparams", ")", ":", "# Adafactor uses less memory than Adam.", "# switch to Adafactor with its recommended learning rate scheme.", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_schedule", "=", "\"rsqrt_decay\"", "hparams", ".", "learning_rate_warmup_steps", "=", "10000", "# Avoid an expensive concat on TPU.", "# >1 shards helps with faster parameter distribution on multi-GPU machines", "hparams", ".", "symbol_modality_num_shards", "=", "1", "# Adaptive batch sizes and sequence lengths are not supported on TPU.", "# Instead, every batch has the same sequence length and the same batch size.", "# Longer sequences are dropped and shorter ones are padded.", "#", "# It is therefore suggested to use a problem where examples have been combined", "# to a longer length, e.g. the \"_packed\" problems.", "#", "# For problems with variable sequence lengths, this parameter controls the", "# maximum sequence length. Shorter sequences are dropped and longer ones", "# are padded.", "#", "# For problems with fixed sequence lengths - e.g. the \"_packed\" problems,", "# this hyperparameter is ignored.", "hparams", ".", "max_length", "=", "64", "# TPUs have less memory than GPUs, so decrease the batch size", "hparams", ".", "batch_size", "=", "2048", "# Using noise broadcast in the dropout layers saves memory during training.", "hparams", ".", "attention_dropout_broadcast_dims", "=", "\"0,1\"", "# batch, heads", "hparams", ".", "relu_dropout_broadcast_dims", "=", "\"1\"", "# length", "hparams", ".", "layer_prepostprocess_dropout_broadcast_dims", "=", "\"1\"", "# length", "return", "hparams" ]
Change hparams to be compatible with TPU training.
[ "Change", "hparams", "to", "be", "compatible", "with", "TPU", "training", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2316-L2351
22,532
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_clean
def transformer_clean(): """No dropout, label smoothing, max_length.""" hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
python
def transformer_clean(): """No dropout, label smoothing, max_length.""" hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
[ "def", "transformer_clean", "(", ")", ":", "hparams", "=", "transformer_base_v2", "(", ")", "hparams", ".", "label_smoothing", "=", "0.0", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.0", "hparams", ".", "attention_dropout", "=", "0.0", "hparams", ".", "relu_dropout", "=", "0.0", "hparams", ".", "max_length", "=", "0", "return", "hparams" ]
No dropout, label smoothing, max_length.
[ "No", "dropout", "label", "smoothing", "max_length", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2441-L2449
22,533
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_lm_tpu_0
def transformer_lm_tpu_0(): """HParams for training languagemodel_lm1b8k on tpu. 92M Params.""" hparams = transformer_clean_big() update_hparams_for_tpu(hparams) hparams.num_heads = 4 # Heads are expensive on TPUs. hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.layer_prepostprocess_dropout = 0.1 return hparams
python
def transformer_lm_tpu_0(): """HParams for training languagemodel_lm1b8k on tpu. 92M Params.""" hparams = transformer_clean_big() update_hparams_for_tpu(hparams) hparams.num_heads = 4 # Heads are expensive on TPUs. hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.layer_prepostprocess_dropout = 0.1 return hparams
[ "def", "transformer_lm_tpu_0", "(", ")", ":", "hparams", "=", "transformer_clean_big", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "num_heads", "=", "4", "# Heads are expensive on TPUs.", "hparams", ".", "batch_size", "=", "4096", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "return", "hparams" ]
HParams for training languagemodel_lm1b8k on tpu. 92M Params.
[ "HParams", "for", "training", "languagemodel_lm1b8k", "on", "tpu", ".", "92M", "Params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2477-L2485
22,534
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_librispeech_v1
def transformer_librispeech_v1(): """HParams for training ASR model on LibriSpeech V1.""" hparams = transformer_base() hparams.num_heads = 4 hparams.filter_size = 1024 hparams.hidden_size = 256 hparams.num_encoder_layers = 5 hparams.num_decoder_layers = 3 hparams.learning_rate = 0.15 hparams.batch_size = 6000000 librispeech.set_librispeech_length_hparams(hparams) return hparams
python
def transformer_librispeech_v1(): """HParams for training ASR model on LibriSpeech V1.""" hparams = transformer_base() hparams.num_heads = 4 hparams.filter_size = 1024 hparams.hidden_size = 256 hparams.num_encoder_layers = 5 hparams.num_decoder_layers = 3 hparams.learning_rate = 0.15 hparams.batch_size = 6000000 librispeech.set_librispeech_length_hparams(hparams) return hparams
[ "def", "transformer_librispeech_v1", "(", ")", ":", "hparams", "=", "transformer_base", "(", ")", "hparams", ".", "num_heads", "=", "4", "hparams", ".", "filter_size", "=", "1024", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "num_encoder_layers", "=", "5", "hparams", ".", "num_decoder_layers", "=", "3", "hparams", ".", "learning_rate", "=", "0.15", "hparams", ".", "batch_size", "=", "6000000", "librispeech", ".", "set_librispeech_length_hparams", "(", "hparams", ")", "return", "hparams" ]
HParams for training ASR model on LibriSpeech V1.
[ "HParams", "for", "training", "ASR", "model", "on", "LibriSpeech", "V1", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2498-L2511
22,535
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_librispeech_v2
def transformer_librispeech_v2(): """HParams for training ASR model on LibriSpeech V2.""" hparams = transformer_base() hparams.max_length = 1240000 hparams.max_input_seq_length = 1550 hparams.max_target_seq_length = 350 hparams.batch_size = 16 hparams.num_decoder_layers = 4 hparams.num_encoder_layers = 6 hparams.hidden_size = 384 hparams.learning_rate = 0.15 hparams.daisy_chain_variables = False hparams.filter_size = 1536 hparams.num_heads = 2 hparams.ffn_layer = "conv_relu_conv" hparams.conv_first_kernel = 9 hparams.weight_decay = 0 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.2 return hparams
python
def transformer_librispeech_v2(): """HParams for training ASR model on LibriSpeech V2.""" hparams = transformer_base() hparams.max_length = 1240000 hparams.max_input_seq_length = 1550 hparams.max_target_seq_length = 350 hparams.batch_size = 16 hparams.num_decoder_layers = 4 hparams.num_encoder_layers = 6 hparams.hidden_size = 384 hparams.learning_rate = 0.15 hparams.daisy_chain_variables = False hparams.filter_size = 1536 hparams.num_heads = 2 hparams.ffn_layer = "conv_relu_conv" hparams.conv_first_kernel = 9 hparams.weight_decay = 0 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.2 return hparams
[ "def", "transformer_librispeech_v2", "(", ")", ":", "hparams", "=", "transformer_base", "(", ")", "hparams", ".", "max_length", "=", "1240000", "hparams", ".", "max_input_seq_length", "=", "1550", "hparams", ".", "max_target_seq_length", "=", "350", "hparams", ".", "batch_size", "=", "16", "hparams", ".", "num_decoder_layers", "=", "4", "hparams", ".", "num_encoder_layers", "=", "6", "hparams", ".", "hidden_size", "=", "384", "hparams", ".", "learning_rate", "=", "0.15", "hparams", ".", "daisy_chain_variables", "=", "False", "hparams", ".", "filter_size", "=", "1536", "hparams", ".", "num_heads", "=", "2", "hparams", ".", "ffn_layer", "=", "\"conv_relu_conv\"", "hparams", ".", "conv_first_kernel", "=", "9", "hparams", ".", "weight_decay", "=", "0", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.2", "hparams", ".", "relu_dropout", "=", "0.2", "return", "hparams" ]
HParams for training ASR model on LibriSpeech V2.
[ "HParams", "for", "training", "ASR", "model", "on", "LibriSpeech", "V2", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2515-L2536
22,536
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_librispeech_tpu_v1
def transformer_librispeech_tpu_v1(): """HParams for training ASR model on Librispeech on TPU v1.""" hparams = transformer_librispeech_v1() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
python
def transformer_librispeech_tpu_v1(): """HParams for training ASR model on Librispeech on TPU v1.""" hparams = transformer_librispeech_v1() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
[ "def", "transformer_librispeech_tpu_v1", "(", ")", ":", "hparams", "=", "transformer_librispeech_v1", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "16", "librispeech", ".", "set_librispeech_length_hparams", "(", "hparams", ")", "return", "hparams" ]
HParams for training ASR model on Librispeech on TPU v1.
[ "HParams", "for", "training", "ASR", "model", "on", "Librispeech", "on", "TPU", "v1", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2540-L2547
22,537
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_librispeech_tpu_v2
def transformer_librispeech_tpu_v2(): """HParams for training ASR model on Librispeech on TPU v2.""" hparams = transformer_librispeech_v2() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
python
def transformer_librispeech_tpu_v2(): """HParams for training ASR model on Librispeech on TPU v2.""" hparams = transformer_librispeech_v2() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
[ "def", "transformer_librispeech_tpu_v2", "(", ")", ":", "hparams", "=", "transformer_librispeech_v2", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "16", "librispeech", ".", "set_librispeech_length_hparams", "(", "hparams", ")", "return", "hparams" ]
HParams for training ASR model on Librispeech on TPU v2.
[ "HParams", "for", "training", "ASR", "model", "on", "Librispeech", "on", "TPU", "v2", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2551-L2558
22,538
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_tpu_1b
def transformer_tpu_1b(): """Hparams for machine translation with ~1.1B parameters.""" hparams = transformer_tpu() hparams.hidden_size = 2048 hparams.filter_size = 8192 hparams.num_hidden_layers = 8 # smaller batch size to avoid OOM hparams.batch_size = 1024 hparams.activation_dtype = "bfloat16" hparams.weight_dtype = "bfloat16" # maximize number of parameters relative to computation by not sharing. hparams.shared_embedding_and_softmax_weights = False return hparams
python
def transformer_tpu_1b(): """Hparams for machine translation with ~1.1B parameters.""" hparams = transformer_tpu() hparams.hidden_size = 2048 hparams.filter_size = 8192 hparams.num_hidden_layers = 8 # smaller batch size to avoid OOM hparams.batch_size = 1024 hparams.activation_dtype = "bfloat16" hparams.weight_dtype = "bfloat16" # maximize number of parameters relative to computation by not sharing. hparams.shared_embedding_and_softmax_weights = False return hparams
[ "def", "transformer_tpu_1b", "(", ")", ":", "hparams", "=", "transformer_tpu", "(", ")", "hparams", ".", "hidden_size", "=", "2048", "hparams", ".", "filter_size", "=", "8192", "hparams", ".", "num_hidden_layers", "=", "8", "# smaller batch size to avoid OOM", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "activation_dtype", "=", "\"bfloat16\"", "hparams", ".", "weight_dtype", "=", "\"bfloat16\"", "# maximize number of parameters relative to computation by not sharing.", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "return", "hparams" ]
Hparams for machine translation with ~1.1B parameters.
[ "Hparams", "for", "machine", "translation", "with", "~1", ".", "1B", "parameters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2599-L2611
22,539
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_wikitext103_l4k_v0
def transformer_wikitext103_l4k_v0(): """HParams for training languagemodel_wikitext103_l4k.""" hparams = transformer_big() # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.num_heads = 4 hparams.max_length = 4096 hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.num_hidden_layers = 8 hparams.attention_dropout = 0.1 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.1 hparams.label_smoothing = 0.0 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 return hparams
python
def transformer_wikitext103_l4k_v0(): """HParams for training languagemodel_wikitext103_l4k.""" hparams = transformer_big() # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.num_heads = 4 hparams.max_length = 4096 hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.num_hidden_layers = 8 hparams.attention_dropout = 0.1 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.1 hparams.label_smoothing = 0.0 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 return hparams
[ "def", "transformer_wikitext103_l4k_v0", "(", ")", ":", "hparams", "=", "transformer_big", "(", ")", "# Adafactor uses less memory than Adam.", "# switch to Adafactor with its recommended learning rate scheme.", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_schedule", "=", "\"rsqrt_decay\"", "hparams", ".", "learning_rate_warmup_steps", "=", "10000", "hparams", ".", "num_heads", "=", "4", "hparams", ".", "max_length", "=", "4096", "hparams", ".", "batch_size", "=", "4096", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "hparams", ".", "num_hidden_layers", "=", "8", "hparams", ".", "attention_dropout", "=", "0.1", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.2", "hparams", ".", "relu_dropout", "=", "0.1", "hparams", ".", "label_smoothing", "=", "0.0", "# Using noise broadcast in the dropout layers saves memory during training.", "hparams", ".", "attention_dropout_broadcast_dims", "=", "\"0,1\"", "# batch, heads", "hparams", ".", "relu_dropout_broadcast_dims", "=", "\"1\"", "# length", "hparams", ".", "layer_prepostprocess_dropout_broadcast_dims", "=", "\"1\"", "# length", "# Avoid an expensive concat on TPU.", "# >1 shards helps with faster parameter distribution on multi-GPU machines", "hparams", ".", "symbol_modality_num_shards", "=", "1", "return", "hparams" ]
HParams for training languagemodel_wikitext103_l4k.
[ "HParams", "for", "training", "languagemodel_wikitext103_l4k", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2615-L2645
22,540
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_wikitext103_l4k_memory_v0
def transformer_wikitext103_l4k_memory_v0(): """HParams for training languagemodel_wikitext103_l4k with memory.""" hparams = transformer_wikitext103_l4k_v0() hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = 64 hparams.split_targets_strided_training = True hparams.add_hparam("memory_type", "transformer_xl") # The hparams specify batch size *before* chunking, but we want to have a # consistent 4K batch size *after* chunking to fully utilize the hardware. target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # 262144 hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 2 * hparams.split_targets_chunk_length hparams.add_hparam("unconditional", True) hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess # By default, cache one chunk only (like Transformer-XL) hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length) return hparams
python
def transformer_wikitext103_l4k_memory_v0(): """HParams for training languagemodel_wikitext103_l4k with memory.""" hparams = transformer_wikitext103_l4k_v0() hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = 64 hparams.split_targets_strided_training = True hparams.add_hparam("memory_type", "transformer_xl") # The hparams specify batch size *before* chunking, but we want to have a # consistent 4K batch size *after* chunking to fully utilize the hardware. target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # 262144 hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 2 * hparams.split_targets_chunk_length hparams.add_hparam("unconditional", True) hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess # By default, cache one chunk only (like Transformer-XL) hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length) return hparams
[ "def", "transformer_wikitext103_l4k_memory_v0", "(", ")", ":", "hparams", "=", "transformer_wikitext103_l4k_v0", "(", ")", "hparams", ".", "split_targets_chunk_length", "=", "64", "hparams", ".", "split_targets_max_chunks", "=", "64", "hparams", ".", "split_targets_strided_training", "=", "True", "hparams", ".", "add_hparam", "(", "\"memory_type\"", ",", "\"transformer_xl\"", ")", "# The hparams specify batch size *before* chunking, but we want to have a", "# consistent 4K batch size *after* chunking to fully utilize the hardware.", "target_tokens_per_batch", "=", "4096", "hparams", ".", "batch_size", "=", "int", "(", "target_tokens_per_batch", "*", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", ")", "# 262144", "hparams", ".", "pos", "=", "None", "hparams", ".", "self_attention_type", "=", "\"dot_product_relative\"", "hparams", ".", "max_relative_position", "=", "2", "*", "hparams", ".", "split_targets_chunk_length", "hparams", ".", "add_hparam", "(", "\"unconditional\"", ",", "True", ")", "hparams", ".", "add_hparam", "(", "\"recurrent_memory_batch_size\"", ",", "0", ")", "# 0 = try to guess", "# By default, cache one chunk only (like Transformer-XL)", "hparams", ".", "add_hparam", "(", "\"num_memory_items\"", ",", "hparams", ".", "split_targets_chunk_length", ")", "return", "hparams" ]
HParams for training languagemodel_wikitext103_l4k with memory.
[ "HParams", "for", "training", "languagemodel_wikitext103_l4k", "with", "memory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2649-L2673
22,541
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_wikitext103_l16k_memory_v0
def transformer_wikitext103_l16k_memory_v0(): """HParams for training languagemodel_wikitext103_l16k with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.max_length = 16384 hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) # The hparams specify batch size *before* chunking, but we want to have a # consistent 4K batch size *after* chunking to fully utilize the hardware. target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) hparams.max_relative_position = 2 * hparams.split_targets_chunk_length return hparams
python
def transformer_wikitext103_l16k_memory_v0(): """HParams for training languagemodel_wikitext103_l16k with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.max_length = 16384 hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) # The hparams specify batch size *before* chunking, but we want to have a # consistent 4K batch size *after* chunking to fully utilize the hardware. target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) hparams.max_relative_position = 2 * hparams.split_targets_chunk_length return hparams
[ "def", "transformer_wikitext103_l16k_memory_v0", "(", ")", ":", "hparams", "=", "transformer_wikitext103_l4k_memory_v0", "(", ")", "hparams", ".", "max_length", "=", "16384", "hparams", ".", "split_targets_chunk_length", "=", "64", "hparams", ".", "split_targets_max_chunks", "=", "int", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", "# The hparams specify batch size *before* chunking, but we want to have a", "# consistent 4K batch size *after* chunking to fully utilize the hardware.", "target_tokens_per_batch", "=", "4096", "hparams", ".", "batch_size", "=", "int", "(", "target_tokens_per_batch", "*", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", ")", "hparams", ".", "max_relative_position", "=", "2", "*", "hparams", ".", "split_targets_chunk_length", "return", "hparams" ]
HParams for training languagemodel_wikitext103_l16k with memory.
[ "HParams", "for", "training", "languagemodel_wikitext103_l16k", "with", "memory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2677-L2694
22,542
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_cifar10_memory_v0
def transformer_cifar10_memory_v0(): """HParams for training image_cifar10_plain_gen_flat_rev with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.num_hidden_layers = 6 hparams.max_length = 32 * 32 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) hparams.num_memory_items = 128 * 3 # Since this is an image problem, batch size refers to examples (not tokens) target_images_per_batch = 4 hparams.batch_size = int(target_images_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # The recurrent memory needs to know the actual batch size (in sequences) hparams.recurrent_memory_batch_size = hparams.batch_size hparams.max_relative_position = ( hparams.num_memory_items + hparams.split_targets_chunk_length) return hparams
python
def transformer_cifar10_memory_v0(): """HParams for training image_cifar10_plain_gen_flat_rev with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.num_hidden_layers = 6 hparams.max_length = 32 * 32 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) hparams.num_memory_items = 128 * 3 # Since this is an image problem, batch size refers to examples (not tokens) target_images_per_batch = 4 hparams.batch_size = int(target_images_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # The recurrent memory needs to know the actual batch size (in sequences) hparams.recurrent_memory_batch_size = hparams.batch_size hparams.max_relative_position = ( hparams.num_memory_items + hparams.split_targets_chunk_length) return hparams
[ "def", "transformer_cifar10_memory_v0", "(", ")", ":", "hparams", "=", "transformer_wikitext103_l4k_memory_v0", "(", ")", "hparams", ".", "num_hidden_layers", "=", "6", "hparams", ".", "max_length", "=", "32", "*", "32", "*", "3", "hparams", ".", "split_targets_chunk_length", "=", "64", "*", "3", "hparams", ".", "split_targets_max_chunks", "=", "int", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", "hparams", ".", "num_memory_items", "=", "128", "*", "3", "# Since this is an image problem, batch size refers to examples (not tokens)", "target_images_per_batch", "=", "4", "hparams", ".", "batch_size", "=", "int", "(", "target_images_per_batch", "*", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", ")", "# The recurrent memory needs to know the actual batch size (in sequences)", "hparams", ".", "recurrent_memory_batch_size", "=", "hparams", ".", "batch_size", "hparams", ".", "max_relative_position", "=", "(", "hparams", ".", "num_memory_items", "+", "hparams", ".", "split_targets_chunk_length", ")", "return", "hparams" ]
HParams for training image_cifar10_plain_gen_flat_rev with memory.
[ "HParams", "for", "training", "image_cifar10_plain_gen_flat_rev", "with", "memory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2698-L2721
22,543
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_imagenet64_memory_v0
def transformer_imagenet64_memory_v0(): """HParams for training image_imagenet64_gen_flat_rev with memory.""" hparams = transformer_cifar10_memory_v0() hparams.max_length = 64 * 64 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) hparams.num_memory_items = 128 * 3 # Since this is an image problem, batch size refers to examples (not tokens) target_images_per_batch = 2 hparams.batch_size = int(target_images_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # The recurrent memory needs to know the actual batch size (in sequences) hparams.recurrent_memory_batch_size = hparams.batch_size hparams.max_relative_position = 3072 return hparams
python
def transformer_imagenet64_memory_v0(): """HParams for training image_imagenet64_gen_flat_rev with memory.""" hparams = transformer_cifar10_memory_v0() hparams.max_length = 64 * 64 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) hparams.num_memory_items = 128 * 3 # Since this is an image problem, batch size refers to examples (not tokens) target_images_per_batch = 2 hparams.batch_size = int(target_images_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # The recurrent memory needs to know the actual batch size (in sequences) hparams.recurrent_memory_batch_size = hparams.batch_size hparams.max_relative_position = 3072 return hparams
[ "def", "transformer_imagenet64_memory_v0", "(", ")", ":", "hparams", "=", "transformer_cifar10_memory_v0", "(", ")", "hparams", ".", "max_length", "=", "64", "*", "64", "*", "3", "hparams", ".", "split_targets_chunk_length", "=", "64", "*", "3", "hparams", ".", "split_targets_max_chunks", "=", "int", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", "hparams", ".", "num_memory_items", "=", "128", "*", "3", "# Since this is an image problem, batch size refers to examples (not tokens)", "target_images_per_batch", "=", "2", "hparams", ".", "batch_size", "=", "int", "(", "target_images_per_batch", "*", "(", "hparams", ".", "max_length", "/", "hparams", ".", "split_targets_chunk_length", ")", ")", "# The recurrent memory needs to know the actual batch size (in sequences)", "hparams", ".", "recurrent_memory_batch_size", "=", "hparams", ".", "batch_size", "hparams", ".", "max_relative_position", "=", "3072", "return", "hparams" ]
HParams for training image_imagenet64_gen_flat_rev with memory.
[ "HParams", "for", "training", "image_imagenet64_gen_flat_rev", "with", "memory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2725-L2745
22,544
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
maybe_reshape_4d_to_3d
def maybe_reshape_4d_to_3d(x): """Reshape input from 4D to 3D if necessary.""" x_shape = common_layers.shape_list(x) is_4d = False if len(x_shape) == 4: x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) is_4d = True return x, x_shape, is_4d
python
def maybe_reshape_4d_to_3d(x): """Reshape input from 4D to 3D if necessary.""" x_shape = common_layers.shape_list(x) is_4d = False if len(x_shape) == 4: x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) is_4d = True return x, x_shape, is_4d
[ "def", "maybe_reshape_4d_to_3d", "(", "x", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "is_4d", "=", "False", "if", "len", "(", "x_shape", ")", "==", "4", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", "*", "x_shape", "[", "2", "]", ",", "x_shape", "[", "3", "]", "]", ")", "is_4d", "=", "True", "return", "x", ",", "x_shape", ",", "is_4d" ]
Reshape input from 4D to 3D if necessary.
[ "Reshape", "input", "from", "4D", "to", "3D", "if", "necessary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L72-L79
22,545
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
local_attention_2d
def local_attention_2d(x, hparams, attention_type="local_attention_2d"): """Local 2d, self attention layer.""" # self-attention with tf.variable_scope("local_2d_self_att"): y = common_attention.multihead_attention_2d( x, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, attention_type=attention_type, query_shape=hparams.query_shape, memory_flange=hparams.memory_flange, name="self_attention") return y
python
def local_attention_2d(x, hparams, attention_type="local_attention_2d"): """Local 2d, self attention layer.""" # self-attention with tf.variable_scope("local_2d_self_att"): y = common_attention.multihead_attention_2d( x, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, attention_type=attention_type, query_shape=hparams.query_shape, memory_flange=hparams.memory_flange, name="self_attention") return y
[ "def", "local_attention_2d", "(", "x", ",", "hparams", ",", "attention_type", "=", "\"local_attention_2d\"", ")", ":", "# self-attention", "with", "tf", ".", "variable_scope", "(", "\"local_2d_self_att\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention_2d", "(", "x", ",", "None", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "attention_type", "=", "attention_type", ",", "query_shape", "=", "hparams", ".", "query_shape", ",", "memory_flange", "=", "hparams", ".", "memory_flange", ",", "name", "=", "\"self_attention\"", ")", "return", "y" ]
Local 2d, self attention layer.
[ "Local", "2d", "self", "attention", "layer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L82-L97
22,546
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
local_within_block_attention
def local_within_block_attention(x, self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="VALID", kv_padding="VALID"): """Local within block self attention.""" x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("local_within_block"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x_new, hparams), None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, name="local_within_block") if is_4d: y = tf.reshape(y, x_shape) return y
python
def local_within_block_attention(x, self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="VALID", kv_padding="VALID"): """Local within block self attention.""" x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("local_within_block"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x_new, hparams), None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, name="local_within_block") if is_4d: y = tf.reshape(y, x_shape) return y
[ "def", "local_within_block_attention", "(", "x", ",", "self_attention_bias", ",", "hparams", ",", "attention_type", "=", "\"local_within_block_mask_right\"", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ")", ":", "x_new", ",", "x_shape", ",", "is_4d", "=", "maybe_reshape_4d_to_3d", "(", "x", ")", "with", "tf", ".", "variable_scope", "(", "\"local_within_block\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x_new", ",", "hparams", ")", ",", "None", ",", "self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "attention_type", ",", "block_width", "=", "hparams", ".", "block_width", ",", "block_length", "=", "hparams", ".", "block_length", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ",", "q_filter_width", "=", "hparams", ".", "q_filter_width", ",", "kv_filter_width", "=", "hparams", ".", "kv_filter_width", ",", "name", "=", "\"local_within_block\"", ")", "if", "is_4d", ":", "y", "=", "tf", ".", "reshape", "(", "y", ",", "x_shape", ")", "return", "y" ]
Local within block self attention.
[ "Local", "within", "block", "self", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L100-L128
22,547
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
get_dilated_1d_attention_mask
def get_dilated_1d_attention_mask( num_heads, block_size, num_blocks, memory_size, gap_size, name="dilated_mask"): """Dilated attention with a masking strategy.""" mask = np.ones((num_heads, block_size, 2*block_size), np.bool) # now going over every row to do the right assignment of # memory blocks for i in range(block_size): visible = 2*block_size - (block_size-i) # You always attend to yourself, set the mask for that mask[:, i, -(block_size - i)] = 0 # Maybe num_blocks can be automatically calculated? for j in range(num_blocks): for k in range(memory_size): index = ((gap_size + memory_size)*j) + k if index >= visible: break mask[:, i, -(index + block_size - i + 1)] = 0 # Verify # adding a num blocks dimension mask = np.expand_dims(mask, axis=1) return tf.constant(mask, dtype=tf.int32, name=name)
python
def get_dilated_1d_attention_mask( num_heads, block_size, num_blocks, memory_size, gap_size, name="dilated_mask"): """Dilated attention with a masking strategy.""" mask = np.ones((num_heads, block_size, 2*block_size), np.bool) # now going over every row to do the right assignment of # memory blocks for i in range(block_size): visible = 2*block_size - (block_size-i) # You always attend to yourself, set the mask for that mask[:, i, -(block_size - i)] = 0 # Maybe num_blocks can be automatically calculated? for j in range(num_blocks): for k in range(memory_size): index = ((gap_size + memory_size)*j) + k if index >= visible: break mask[:, i, -(index + block_size - i + 1)] = 0 # Verify # adding a num blocks dimension mask = np.expand_dims(mask, axis=1) return tf.constant(mask, dtype=tf.int32, name=name)
[ "def", "get_dilated_1d_attention_mask", "(", "num_heads", ",", "block_size", ",", "num_blocks", ",", "memory_size", ",", "gap_size", ",", "name", "=", "\"dilated_mask\"", ")", ":", "mask", "=", "np", ".", "ones", "(", "(", "num_heads", ",", "block_size", ",", "2", "*", "block_size", ")", ",", "np", ".", "bool", ")", "# now going over every row to do the right assignment of", "# memory blocks", "for", "i", "in", "range", "(", "block_size", ")", ":", "visible", "=", "2", "*", "block_size", "-", "(", "block_size", "-", "i", ")", "# You always attend to yourself, set the mask for that", "mask", "[", ":", ",", "i", ",", "-", "(", "block_size", "-", "i", ")", "]", "=", "0", "# Maybe num_blocks can be automatically calculated?", "for", "j", "in", "range", "(", "num_blocks", ")", ":", "for", "k", "in", "range", "(", "memory_size", ")", ":", "index", "=", "(", "(", "gap_size", "+", "memory_size", ")", "*", "j", ")", "+", "k", "if", "index", ">=", "visible", ":", "break", "mask", "[", ":", ",", "i", ",", "-", "(", "index", "+", "block_size", "-", "i", "+", "1", ")", "]", "=", "0", "# Verify", "# adding a num blocks dimension", "mask", "=", "np", ".", "expand_dims", "(", "mask", ",", "axis", "=", "1", ")", "return", "tf", ".", "constant", "(", "mask", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "name", ")" ]
Dilated attention with a masking strategy.
[ "Dilated", "attention", "with", "a", "masking", "strategy", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L164-L187
22,548
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
dilated_attention_1d
def dilated_attention_1d(x, hparams, attention_type="masked_dilated_1d", q_padding="VALID", kv_padding="VALID", gap_size=2): """Dilated 1d self attention.""" # self-attention x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("masked_dilated_1d"): y = common_attention.multihead_attention( x, None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, gap_size=gap_size, num_memory_blocks=hparams.num_memory_blocks, name="self_attention") if is_4d: y = tf.reshape(y, x_shape) y.set_shape([None, None, None, hparams.hidden_size]) return y
python
def dilated_attention_1d(x, hparams, attention_type="masked_dilated_1d", q_padding="VALID", kv_padding="VALID", gap_size=2): """Dilated 1d self attention.""" # self-attention x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("masked_dilated_1d"): y = common_attention.multihead_attention( x, None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, gap_size=gap_size, num_memory_blocks=hparams.num_memory_blocks, name="self_attention") if is_4d: y = tf.reshape(y, x_shape) y.set_shape([None, None, None, hparams.hidden_size]) return y
[ "def", "dilated_attention_1d", "(", "x", ",", "hparams", ",", "attention_type", "=", "\"masked_dilated_1d\"", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ",", "gap_size", "=", "2", ")", ":", "# self-attention", "x", ",", "x_shape", ",", "is_4d", "=", "maybe_reshape_4d_to_3d", "(", "x", ")", "with", "tf", ".", "variable_scope", "(", "\"masked_dilated_1d\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "x", ",", "None", ",", "None", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "attention_type", ",", "block_width", "=", "hparams", ".", "block_width", ",", "block_length", "=", "hparams", ".", "block_length", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ",", "q_filter_width", "=", "hparams", ".", "q_filter_width", ",", "kv_filter_width", "=", "hparams", ".", "kv_filter_width", ",", "gap_size", "=", "gap_size", ",", "num_memory_blocks", "=", "hparams", ".", "num_memory_blocks", ",", "name", "=", "\"self_attention\"", ")", "if", "is_4d", ":", "y", "=", "tf", ".", "reshape", "(", "y", ",", "x_shape", ")", "y", ".", "set_shape", "(", "[", "None", ",", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "return", "y" ]
Dilated 1d self attention.
[ "Dilated", "1d", "self", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L190-L222
22,549
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
local_global_attention
def local_global_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Local and global 1d self attention.""" with tf.variable_scope("self_local_global_att"): [x_global, x_local] = tf.split(x, 2, axis=-1) split_hidden_size = int(hparams.hidden_size / 2) split_heads = int(hparams.num_heads / 2) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) y_global = common_attention.multihead_attention( x_global, None, self_attention_bias, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="global_self_att") y_local = common_attention.multihead_attention( x_local, None, None, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, attention_type="local_masked", block_length=hparams.block_length, block_width=hparams.block_width, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="local_self_att") y = tf.concat([y_global, y_local], axis=-1) return y
python
def local_global_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Local and global 1d self attention.""" with tf.variable_scope("self_local_global_att"): [x_global, x_local] = tf.split(x, 2, axis=-1) split_hidden_size = int(hparams.hidden_size / 2) split_heads = int(hparams.num_heads / 2) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) y_global = common_attention.multihead_attention( x_global, None, self_attention_bias, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="global_self_att") y_local = common_attention.multihead_attention( x_local, None, None, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, attention_type="local_masked", block_length=hparams.block_length, block_width=hparams.block_width, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="local_self_att") y = tf.concat([y_global, y_local], axis=-1) return y
[ "def", "local_global_attention", "(", "x", ",", "self_attention_bias", ",", "hparams", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"self_local_global_att\"", ")", ":", "[", "x_global", ",", "x_local", "]", "=", "tf", ".", "split", "(", "x", ",", "2", ",", "axis", "=", "-", "1", ")", "split_hidden_size", "=", "int", "(", "hparams", ".", "hidden_size", "/", "2", ")", "split_heads", "=", "int", "(", "hparams", ".", "num_heads", "/", "2", ")", "if", "self_attention_bias", "is", "not", "None", ":", "self_attention_bias", "=", "get_self_attention_bias", "(", "x", ")", "y_global", "=", "common_attention", ".", "multihead_attention", "(", "x_global", ",", "None", ",", "self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "split_hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "split_hidden_size", ",", "split_hidden_size", ",", "split_heads", ",", "hparams", ".", "attention_dropout", ",", "q_filter_width", "=", "hparams", ".", "q_filter_width", ",", "kv_filter_width", "=", "hparams", ".", "kv_filter_width", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ",", "name", "=", "\"global_self_att\"", ")", "y_local", "=", "common_attention", ".", "multihead_attention", "(", "x_local", ",", "None", ",", "None", ",", "hparams", ".", "attention_key_channels", "or", "split_hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "split_hidden_size", ",", "split_hidden_size", ",", "split_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "\"local_masked\"", ",", "block_length", "=", "hparams", ".", "block_length", ",", "block_width", "=", "hparams", ".", "block_width", ",", "q_filter_width", "=", "hparams", ".", "q_filter_width", ",", "kv_filter_width", "=", "hparams", ".", "kv_filter_width", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ",", "name", "=", "\"local_self_att\"", ")", "y", "=", "tf", ".", "concat", "(", "[", "y_global", ",", "y_local", "]", ",", "axis", "=", "-", "1", ")", "return", "y" ]
Local and global 1d self attention.
[ "Local", "and", "global", "1d", "self", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L225-L269
22,550
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
full_self_attention
def full_self_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Full self-attention layer.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) with tf.variable_scope("self_att"): y = common_attention.multihead_attention( x, None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="self_att") if is_4d: y = tf.reshape(y, [x_shape[0], x_shape[1], x_shape[2], x_shape[3]]) y.set_shape([None, None, None, hparams.hidden_size]) return y
python
def full_self_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Full self-attention layer.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) with tf.variable_scope("self_att"): y = common_attention.multihead_attention( x, None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="self_att") if is_4d: y = tf.reshape(y, [x_shape[0], x_shape[1], x_shape[2], x_shape[3]]) y.set_shape([None, None, None, hparams.hidden_size]) return y
[ "def", "full_self_attention", "(", "x", ",", "self_attention_bias", ",", "hparams", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", ":", "x", ",", "x_shape", ",", "is_4d", "=", "maybe_reshape_4d_to_3d", "(", "x", ")", "if", "self_attention_bias", "is", "not", "None", ":", "self_attention_bias", "=", "get_self_attention_bias", "(", "x", ")", "with", "tf", ".", "variable_scope", "(", "\"self_att\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "x", ",", "None", ",", "self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "q_filter_width", "=", "hparams", ".", "q_filter_width", ",", "kv_filter_width", "=", "hparams", ".", "kv_filter_width", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ",", "name", "=", "\"self_att\"", ")", "if", "is_4d", ":", "y", "=", "tf", ".", "reshape", "(", "y", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", ",", "x_shape", "[", "2", "]", ",", "x_shape", "[", "3", "]", "]", ")", "y", ".", "set_shape", "(", "[", "None", ",", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "return", "y" ]
Full self-attention layer.
[ "Full", "self", "-", "attention", "layer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L272-L299
22,551
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
transformer_decoder_layers
def transformer_decoder_layers(inputs, encoder_output, num_layers, hparams, self_attention_bias=None, encoder_decoder_attention_bias=None, attention_type=AttentionType.LOCAL_2D, losses=None, name="transformer"): """Multi layer transformer.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) if attention_type == AttentionType.DILATED: assert len(hparams.gap_sizes) == num_layers for layer in range(num_layers): with tf.variable_scope("%s_layer_%d" % (name, layer)): # self-attention + skip connections if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="masked_local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.RELATIVE_LOCAL_1D: y = local_attention_1d( common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_relative_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.NON_CAUSAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding="VALID", kv_padding="VALID") elif attention_type == AttentionType.LOCAL_BLOCK: y = local_within_block_attention( common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.GLOCAL: y = local_global_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.DILATED: y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, q_padding="LEFT", kv_padding="LEFT", gap_size=hparams.gap_sizes[layer]) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") x = common_layers.layer_postprocess(x, y, hparams) # enc-dec attention + skip connections if encoder_output is not None: y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layers + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, losses=losses) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
python
def transformer_decoder_layers(inputs, encoder_output, num_layers, hparams, self_attention_bias=None, encoder_decoder_attention_bias=None, attention_type=AttentionType.LOCAL_2D, losses=None, name="transformer"): """Multi layer transformer.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) if attention_type == AttentionType.DILATED: assert len(hparams.gap_sizes) == num_layers for layer in range(num_layers): with tf.variable_scope("%s_layer_%d" % (name, layer)): # self-attention + skip connections if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="masked_local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.RELATIVE_LOCAL_1D: y = local_attention_1d( common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_relative_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.NON_CAUSAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding="VALID", kv_padding="VALID") elif attention_type == AttentionType.LOCAL_BLOCK: y = local_within_block_attention( common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.GLOCAL: y = local_global_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.DILATED: y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, q_padding="LEFT", kv_padding="LEFT", gap_size=hparams.gap_sizes[layer]) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") x = common_layers.layer_postprocess(x, y, hparams) # enc-dec attention + skip connections if encoder_output is not None: y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layers + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, losses=losses) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
[ "def", "transformer_decoder_layers", "(", "inputs", ",", "encoder_output", ",", "num_layers", ",", "hparams", ",", "self_attention_bias", "=", "None", ",", "encoder_decoder_attention_bias", "=", "None", ",", "attention_type", "=", "AttentionType", ".", "LOCAL_2D", ",", "losses", "=", "None", ",", "name", "=", "\"transformer\"", ")", ":", "x", "=", "inputs", "x", "=", "tf", ".", "nn", ".", "dropout", "(", "x", ",", "1.0", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "if", "attention_type", "==", "AttentionType", ".", "DILATED", ":", "assert", "len", "(", "hparams", ".", "gap_sizes", ")", "==", "num_layers", "for", "layer", "in", "range", "(", "num_layers", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"%s_layer_%d\"", "%", "(", "name", ",", "layer", ")", ")", ":", "# self-attention + skip connections", "if", "attention_type", "==", "AttentionType", ".", "LOCAL_2D", ":", "y", "=", "local_attention_2d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "attention_type", "=", "\"masked_local_attention_2d\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "LOCAL_1D", ":", "y", "=", "local_attention_1d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "attention_type", "=", "\"local_mask_right\"", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "RELATIVE_LOCAL_1D", ":", "y", "=", "local_attention_1d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "attention_type", "=", "\"local_relative_mask_right\"", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "NON_CAUSAL_1D", ":", "y", "=", "local_attention_1d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "attention_type", "=", "\"local_unmasked\"", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "LOCAL_BLOCK", ":", "y", "=", "local_within_block_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "self_attention_bias", ",", "hparams", ",", "attention_type", "=", "\"local_within_block_mask_right\"", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "GLOCAL", ":", "y", "=", "local_global_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "self_attention_bias", ",", "hparams", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "DILATED", ":", "y", "=", "dilated_attention_1d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ",", "gap_size", "=", "hparams", ".", "gap_sizes", "[", "layer", "]", ")", "elif", "attention_type", "==", "AttentionType", ".", "GLOBAL", ":", "y", "=", "full_self_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "self_attention_bias", ",", "hparams", ",", "q_padding", "=", "\"LEFT\"", ",", "kv_padding", "=", "\"LEFT\"", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "# enc-dec attention + skip connections", "if", "encoder_output", "is", "not", "None", ":", "y", "=", "encdec_attention_1d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "hparams", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "# feed-fwd layers + skip connections", "y", "=", "ffn_layer", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "losses", "=", "losses", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "return", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")" ]
Multi layer transformer.
[ "Multi", "layer", "transformer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L327-L396
22,552
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
transformer_encoder_layers
def transformer_encoder_layers(inputs, num_layers, hparams, attention_type=AttentionType.GLOBAL, self_attention_bias=None, q_padding="VALID", kv_padding="VALID", name="transformer"): """Multi layer transformer encoder.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) for layer in range(num_layers): # attention layers + skip connections with tf.variable_scope("%s_layer_%d" % (name, layer)): if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding=q_padding, kv_padding=kv_padding) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding=q_padding, kv_padding=kv_padding) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layer + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
python
def transformer_encoder_layers(inputs, num_layers, hparams, attention_type=AttentionType.GLOBAL, self_attention_bias=None, q_padding="VALID", kv_padding="VALID", name="transformer"): """Multi layer transformer encoder.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) for layer in range(num_layers): # attention layers + skip connections with tf.variable_scope("%s_layer_%d" % (name, layer)): if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding=q_padding, kv_padding=kv_padding) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding=q_padding, kv_padding=kv_padding) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layer + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
[ "def", "transformer_encoder_layers", "(", "inputs", ",", "num_layers", ",", "hparams", ",", "attention_type", "=", "AttentionType", ".", "GLOBAL", ",", "self_attention_bias", "=", "None", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ",", "name", "=", "\"transformer\"", ")", ":", "x", "=", "inputs", "x", "=", "tf", ".", "nn", ".", "dropout", "(", "x", ",", "1.0", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "for", "layer", "in", "range", "(", "num_layers", ")", ":", "# attention layers + skip connections", "with", "tf", ".", "variable_scope", "(", "\"%s_layer_%d\"", "%", "(", "name", ",", "layer", ")", ")", ":", "if", "attention_type", "==", "AttentionType", ".", "LOCAL_2D", ":", "y", "=", "local_attention_2d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "attention_type", "=", "\"local_attention_2d\"", ")", "elif", "attention_type", "==", "AttentionType", ".", "LOCAL_1D", ":", "y", "=", "local_attention_1d", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ",", "attention_type", "=", "\"local_unmasked\"", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ")", "elif", "attention_type", "==", "AttentionType", ".", "GLOBAL", ":", "y", "=", "full_self_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "self_attention_bias", ",", "hparams", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "# feed-fwd layer + skip connections", "y", "=", "ffn_layer", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "return", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")" ]
Multi layer transformer encoder.
[ "Multi", "layer", "transformer", "encoder", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L399-L431
22,553
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
ffn_layer
def ffn_layer(x, hparams, losses=None): """ffn layer transformer.""" with tf.variable_scope("ffn"): if hparams.ffn_layer == "none": return x if hparams.ffn_layer == "conv_hidden_relu": y = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) elif hparams.ffn_layer == "normed_conv_hidden_relu": y = common_layers.normed_conv_hidden_relu( x, hparams.norm_type, hparams.layer_norm_epsilon, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, norm_name="convnorm") elif hparams.ffn_layer == "self_attention_ffn": x_shape = tf.shape(x) x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size]) y = common_attention.ffn_self_attention_layer( x, hparams.filter_size, hparams.hidden_size, hparams.num_parts, hparams.attention_dropout, hparams.share_kv) y = tf.reshape(y, x_shape) elif hparams.ffn_layer == "local_moe_tpu": overhead = (hparams.moe_overhead_train if hparams.mode == tf.estimator.ModeKeys.TRAIN else hparams.moe_overhead_eval) x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) y, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) if is_4d: y = tf.reshape(y, x_shape) if losses is None: raise ValueError( "transformer_ffn_layer with type local_moe_tpu must pass in " "a losses list") losses.append(loss) else: assert hparams.ffn_layer == "glu_ffn" y = common_layers.gated_linear_unit_layer(x) return y
python
def ffn_layer(x, hparams, losses=None): """ffn layer transformer.""" with tf.variable_scope("ffn"): if hparams.ffn_layer == "none": return x if hparams.ffn_layer == "conv_hidden_relu": y = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) elif hparams.ffn_layer == "normed_conv_hidden_relu": y = common_layers.normed_conv_hidden_relu( x, hparams.norm_type, hparams.layer_norm_epsilon, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, norm_name="convnorm") elif hparams.ffn_layer == "self_attention_ffn": x_shape = tf.shape(x) x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size]) y = common_attention.ffn_self_attention_layer( x, hparams.filter_size, hparams.hidden_size, hparams.num_parts, hparams.attention_dropout, hparams.share_kv) y = tf.reshape(y, x_shape) elif hparams.ffn_layer == "local_moe_tpu": overhead = (hparams.moe_overhead_train if hparams.mode == tf.estimator.ModeKeys.TRAIN else hparams.moe_overhead_eval) x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) y, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) if is_4d: y = tf.reshape(y, x_shape) if losses is None: raise ValueError( "transformer_ffn_layer with type local_moe_tpu must pass in " "a losses list") losses.append(loss) else: assert hparams.ffn_layer == "glu_ffn" y = common_layers.gated_linear_unit_layer(x) return y
[ "def", "ffn_layer", "(", "x", ",", "hparams", ",", "losses", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"ffn\"", ")", ":", "if", "hparams", ".", "ffn_layer", "==", "\"none\"", ":", "return", "x", "if", "hparams", ".", "ffn_layer", "==", "\"conv_hidden_relu\"", ":", "y", "=", "common_layers", ".", "dense_relu_dense", "(", "x", ",", "hparams", ".", "filter_size", ",", "hparams", ".", "hidden_size", ",", "dropout", "=", "hparams", ".", "relu_dropout", ")", "elif", "hparams", ".", "ffn_layer", "==", "\"normed_conv_hidden_relu\"", ":", "y", "=", "common_layers", ".", "normed_conv_hidden_relu", "(", "x", ",", "hparams", ".", "norm_type", ",", "hparams", ".", "layer_norm_epsilon", ",", "hparams", ".", "filter_size", ",", "hparams", ".", "hidden_size", ",", "dropout", "=", "hparams", ".", "relu_dropout", ",", "norm_name", "=", "\"convnorm\"", ")", "elif", "hparams", ".", "ffn_layer", "==", "\"self_attention_ffn\"", ":", "x_shape", "=", "tf", ".", "shape", "(", "x", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "-", "1", ",", "hparams", ".", "hidden_size", "]", ")", "y", "=", "common_attention", ".", "ffn_self_attention_layer", "(", "x", ",", "hparams", ".", "filter_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_parts", ",", "hparams", ".", "attention_dropout", ",", "hparams", ".", "share_kv", ")", "y", "=", "tf", ".", "reshape", "(", "y", ",", "x_shape", ")", "elif", "hparams", ".", "ffn_layer", "==", "\"local_moe_tpu\"", ":", "overhead", "=", "(", "hparams", ".", "moe_overhead_train", "if", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "else", "hparams", ".", "moe_overhead_eval", ")", "x", ",", "x_shape", ",", "is_4d", "=", "maybe_reshape_4d_to_3d", "(", "x", ")", "y", ",", "loss", "=", "expert_utils", ".", "local_moe_tpu", "(", "x", ",", "hparams", ".", "filter_size", "//", "2", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "moe_num_experts", ",", "overhead", "=", "overhead", ",", "loss_coef", "=", "hparams", ".", "moe_loss_coef", ")", "if", "is_4d", ":", "y", "=", "tf", ".", "reshape", "(", "y", ",", "x_shape", ")", "if", "losses", "is", "None", ":", "raise", "ValueError", "(", "\"transformer_ffn_layer with type local_moe_tpu must pass in \"", "\"a losses list\"", ")", "losses", ".", "append", "(", "loss", ")", "else", ":", "assert", "hparams", ".", "ffn_layer", "==", "\"glu_ffn\"", "y", "=", "common_layers", ".", "gated_linear_unit_layer", "(", "x", ")", "return", "y" ]
ffn layer transformer.
[ "ffn", "layer", "transformer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L434-L481
22,554
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
get_self_attention_bias
def get_self_attention_bias(x): """Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] """ x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle( x_shape[1]) return self_attention_bias
python
def get_self_attention_bias(x): """Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] """ x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle( x_shape[1]) return self_attention_bias
[ "def", "get_self_attention_bias", "(", "x", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "self_attention_bias", "=", "common_attention", ".", "attention_bias_lower_triangle", "(", "x_shape", "[", "1", "]", ")", "return", "self_attention_bias" ]
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1]
[ "Creates", "masked", "self", "attention", "bias", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L484-L497
22,555
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
postprocess_image
def postprocess_image(x, rows, cols, hparams): """Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth]. """ batch = common_layers.shape_list(x)[0] x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size]) likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if likelihood == DistributionType.DMOL: depth = hparams.num_mixtures * 10 targets = tf.layers.dense(x, depth, use_bias=False, activation=None, name="output_conv") else: depth = 256 targets = tf.layers.dense(x, depth, use_bias=True, activation=None, name="output_conv") if (hparams.mode == tf.estimator.ModeKeys.PREDICT and hparams.block_raster_scan): y = targets yshape = common_layers.shape_list(y) block_length = hparams.query_shape[0] block_width = hparams.query_shape[1] # Break into block row wise. y = tf.reshape(y, [batch, yshape[1] // block_length, block_length, yshape[2], depth]) yshape = common_layers.shape_list(y) # Break into blocks width wise. y_blocks = tf.reshape(y, [batch, yshape[1], yshape[2], yshape[3] // block_width, block_width, depth]) # Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length, # block_width, depth]. targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5]) return targets
python
def postprocess_image(x, rows, cols, hparams): """Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth]. """ batch = common_layers.shape_list(x)[0] x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size]) likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if likelihood == DistributionType.DMOL: depth = hparams.num_mixtures * 10 targets = tf.layers.dense(x, depth, use_bias=False, activation=None, name="output_conv") else: depth = 256 targets = tf.layers.dense(x, depth, use_bias=True, activation=None, name="output_conv") if (hparams.mode == tf.estimator.ModeKeys.PREDICT and hparams.block_raster_scan): y = targets yshape = common_layers.shape_list(y) block_length = hparams.query_shape[0] block_width = hparams.query_shape[1] # Break into block row wise. y = tf.reshape(y, [batch, yshape[1] // block_length, block_length, yshape[2], depth]) yshape = common_layers.shape_list(y) # Break into blocks width wise. y_blocks = tf.reshape(y, [batch, yshape[1], yshape[2], yshape[3] // block_width, block_width, depth]) # Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length, # block_width, depth]. targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5]) return targets
[ "def", "postprocess_image", "(", "x", ",", "rows", ",", "cols", ",", "hparams", ")", ":", "batch", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "0", "]", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", ",", "rows", ",", "cols", ",", "hparams", ".", "hidden_size", "]", ")", "likelihood", "=", "getattr", "(", "hparams", ",", "\"likelihood\"", ",", "DistributionType", ".", "CAT", ")", "if", "likelihood", "==", "DistributionType", ".", "DMOL", ":", "depth", "=", "hparams", ".", "num_mixtures", "*", "10", "targets", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "depth", ",", "use_bias", "=", "False", ",", "activation", "=", "None", ",", "name", "=", "\"output_conv\"", ")", "else", ":", "depth", "=", "256", "targets", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "depth", ",", "use_bias", "=", "True", ",", "activation", "=", "None", ",", "name", "=", "\"output_conv\"", ")", "if", "(", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", "and", "hparams", ".", "block_raster_scan", ")", ":", "y", "=", "targets", "yshape", "=", "common_layers", ".", "shape_list", "(", "y", ")", "block_length", "=", "hparams", ".", "query_shape", "[", "0", "]", "block_width", "=", "hparams", ".", "query_shape", "[", "1", "]", "# Break into block row wise.", "y", "=", "tf", ".", "reshape", "(", "y", ",", "[", "batch", ",", "yshape", "[", "1", "]", "//", "block_length", ",", "block_length", ",", "yshape", "[", "2", "]", ",", "depth", "]", ")", "yshape", "=", "common_layers", ".", "shape_list", "(", "y", ")", "# Break into blocks width wise.", "y_blocks", "=", "tf", ".", "reshape", "(", "y", ",", "[", "batch", ",", "yshape", "[", "1", "]", ",", "yshape", "[", "2", "]", ",", "yshape", "[", "3", "]", "//", "block_width", ",", "block_width", ",", "depth", "]", ")", "# Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length,", "# block_width, depth].", "targets", "=", "tf", ".", "transpose", "(", "y_blocks", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", ",", "5", "]", ")", "return", "targets" ]
Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth].
[ "Postprocessing", "after", "decoding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L500-L555
22,556
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
prepare_encoder
def prepare_encoder(inputs, hparams, attention_type="local_1d"): """Prepare encoder for images.""" x = prepare_image(inputs, hparams, name="enc_channels") # Add position signals. x = add_pos_signals(x, hparams, "enc_pos") x_shape = common_layers.shape_list(x) if attention_type == "local_1d": x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x.set_shape([None, None, hparams.hidden_size]) elif attention_type == "local_2d": x.set_shape([None, None, None, hparams.hidden_size]) return x
python
def prepare_encoder(inputs, hparams, attention_type="local_1d"): """Prepare encoder for images.""" x = prepare_image(inputs, hparams, name="enc_channels") # Add position signals. x = add_pos_signals(x, hparams, "enc_pos") x_shape = common_layers.shape_list(x) if attention_type == "local_1d": x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x.set_shape([None, None, hparams.hidden_size]) elif attention_type == "local_2d": x.set_shape([None, None, None, hparams.hidden_size]) return x
[ "def", "prepare_encoder", "(", "inputs", ",", "hparams", ",", "attention_type", "=", "\"local_1d\"", ")", ":", "x", "=", "prepare_image", "(", "inputs", ",", "hparams", ",", "name", "=", "\"enc_channels\"", ")", "# Add position signals.", "x", "=", "add_pos_signals", "(", "x", ",", "hparams", ",", "\"enc_pos\"", ")", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "if", "attention_type", "==", "\"local_1d\"", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", "*", "x_shape", "[", "2", "]", ",", "hparams", ".", "hidden_size", "]", ")", "x", ".", "set_shape", "(", "[", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "elif", "attention_type", "==", "\"local_2d\"", ":", "x", ".", "set_shape", "(", "[", "None", ",", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "return", "x" ]
Prepare encoder for images.
[ "Prepare", "encoder", "for", "images", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L558-L569
22,557
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
prepare_decoder
def prepare_decoder(targets, hparams): """Prepare decoder for images.""" targets_shape = common_layers.shape_list(targets) channels = hparams.num_channels curr_infer_length = None # during training, images are [batch, IMG_LEN, IMG_LEN, 3]. # At inference, they are [batch, curr_infer_length, 1, 1] if hparams.mode == tf.estimator.ModeKeys.PREDICT: curr_infer_length = targets_shape[1] if hparams.block_raster_scan: assert hparams.img_len*channels % hparams.query_shape[1] == 0 assert hparams.img_len % hparams.query_shape[0] == 0 total_block_width = hparams.img_len*channels # Decoding is in block raster scan order. We divide the image into # hparams.query_shape blocks and then decode each block in raster scan. # To make that compatible with our inference pipeline, pad the target so # that rows is a multiple of query_shape and columns is a multiple of # hparams.img_len*channels curr_infer_length = targets_shape[1] block_padding_factor = total_block_width * hparams.query_shape[0] targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % block_padding_factor], [0, 0], [0, 0]]) num_blocks = total_block_width // hparams.query_shape[1] # Reshape the image to represent blocks target_blocks = tf.reshape( targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0], hparams.query_shape[1]]) # Transpose to read the image in 2D fashion. targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4]) else: # add padding to make sure the size of targets is a multiple of img_height # times number of channels. This is needed for positional encodings and # for doing the RGB lookup. padding_factor = channels * hparams.img_len targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]]) targets = tf.reshape(targets, [targets_shape[0], -1, hparams.img_len, channels]) # Preprocess image x = prepare_image(targets, hparams, name="dec_channels") x_shape = common_layers.shape_list(x) if (hparams.dec_attention_type == AttentionType.LOCAL_2D or hparams.dec_attention_type == AttentionType.LOCAL_BLOCK): x = common_attention.right_shift_blockwise(x, hparams.query_shape) x = add_pos_signals(x, hparams, "dec_pos") else: # Add position signals x = tf.reshape(x, [targets_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x = common_layers.shift_right_3d(x) x = tf.reshape(x, [targets_shape[0], x_shape[1], x_shape[2], hparams.hidden_size]) x = add_pos_signals(x, hparams, "dec_pos") x = common_layers.cast_like(x, targets) return x, x_shape[1], x_shape[2]
python
def prepare_decoder(targets, hparams): """Prepare decoder for images.""" targets_shape = common_layers.shape_list(targets) channels = hparams.num_channels curr_infer_length = None # during training, images are [batch, IMG_LEN, IMG_LEN, 3]. # At inference, they are [batch, curr_infer_length, 1, 1] if hparams.mode == tf.estimator.ModeKeys.PREDICT: curr_infer_length = targets_shape[1] if hparams.block_raster_scan: assert hparams.img_len*channels % hparams.query_shape[1] == 0 assert hparams.img_len % hparams.query_shape[0] == 0 total_block_width = hparams.img_len*channels # Decoding is in block raster scan order. We divide the image into # hparams.query_shape blocks and then decode each block in raster scan. # To make that compatible with our inference pipeline, pad the target so # that rows is a multiple of query_shape and columns is a multiple of # hparams.img_len*channels curr_infer_length = targets_shape[1] block_padding_factor = total_block_width * hparams.query_shape[0] targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % block_padding_factor], [0, 0], [0, 0]]) num_blocks = total_block_width // hparams.query_shape[1] # Reshape the image to represent blocks target_blocks = tf.reshape( targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0], hparams.query_shape[1]]) # Transpose to read the image in 2D fashion. targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4]) else: # add padding to make sure the size of targets is a multiple of img_height # times number of channels. This is needed for positional encodings and # for doing the RGB lookup. padding_factor = channels * hparams.img_len targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]]) targets = tf.reshape(targets, [targets_shape[0], -1, hparams.img_len, channels]) # Preprocess image x = prepare_image(targets, hparams, name="dec_channels") x_shape = common_layers.shape_list(x) if (hparams.dec_attention_type == AttentionType.LOCAL_2D or hparams.dec_attention_type == AttentionType.LOCAL_BLOCK): x = common_attention.right_shift_blockwise(x, hparams.query_shape) x = add_pos_signals(x, hparams, "dec_pos") else: # Add position signals x = tf.reshape(x, [targets_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x = common_layers.shift_right_3d(x) x = tf.reshape(x, [targets_shape[0], x_shape[1], x_shape[2], hparams.hidden_size]) x = add_pos_signals(x, hparams, "dec_pos") x = common_layers.cast_like(x, targets) return x, x_shape[1], x_shape[2]
[ "def", "prepare_decoder", "(", "targets", ",", "hparams", ")", ":", "targets_shape", "=", "common_layers", ".", "shape_list", "(", "targets", ")", "channels", "=", "hparams", ".", "num_channels", "curr_infer_length", "=", "None", "# during training, images are [batch, IMG_LEN, IMG_LEN, 3].", "# At inference, they are [batch, curr_infer_length, 1, 1]", "if", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ":", "curr_infer_length", "=", "targets_shape", "[", "1", "]", "if", "hparams", ".", "block_raster_scan", ":", "assert", "hparams", ".", "img_len", "*", "channels", "%", "hparams", ".", "query_shape", "[", "1", "]", "==", "0", "assert", "hparams", ".", "img_len", "%", "hparams", ".", "query_shape", "[", "0", "]", "==", "0", "total_block_width", "=", "hparams", ".", "img_len", "*", "channels", "# Decoding is in block raster scan order. We divide the image into", "# hparams.query_shape blocks and then decode each block in raster scan.", "# To make that compatible with our inference pipeline, pad the target so", "# that rows is a multiple of query_shape and columns is a multiple of", "# hparams.img_len*channels", "curr_infer_length", "=", "targets_shape", "[", "1", "]", "block_padding_factor", "=", "total_block_width", "*", "hparams", ".", "query_shape", "[", "0", "]", "targets", "=", "tf", ".", "pad", "(", "targets", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "-", "curr_infer_length", "%", "block_padding_factor", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "num_blocks", "=", "total_block_width", "//", "hparams", ".", "query_shape", "[", "1", "]", "# Reshape the image to represent blocks", "target_blocks", "=", "tf", ".", "reshape", "(", "targets", ",", "[", "targets_shape", "[", "0", "]", ",", "-", "1", ",", "num_blocks", ",", "hparams", ".", "query_shape", "[", "0", "]", ",", "hparams", ".", "query_shape", "[", "1", "]", "]", ")", "# Transpose to read the image in 2D fashion.", "targets", "=", "tf", ".", "transpose", "(", "target_blocks", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", "]", ")", "else", ":", "# add padding to make sure the size of targets is a multiple of img_height", "# times number of channels. This is needed for positional encodings and", "# for doing the RGB lookup.", "padding_factor", "=", "channels", "*", "hparams", ".", "img_len", "targets", "=", "tf", ".", "pad", "(", "targets", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "-", "curr_infer_length", "%", "padding_factor", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "targets", "=", "tf", ".", "reshape", "(", "targets", ",", "[", "targets_shape", "[", "0", "]", ",", "-", "1", ",", "hparams", ".", "img_len", ",", "channels", "]", ")", "# Preprocess image", "x", "=", "prepare_image", "(", "targets", ",", "hparams", ",", "name", "=", "\"dec_channels\"", ")", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "if", "(", "hparams", ".", "dec_attention_type", "==", "AttentionType", ".", "LOCAL_2D", "or", "hparams", ".", "dec_attention_type", "==", "AttentionType", ".", "LOCAL_BLOCK", ")", ":", "x", "=", "common_attention", ".", "right_shift_blockwise", "(", "x", ",", "hparams", ".", "query_shape", ")", "x", "=", "add_pos_signals", "(", "x", ",", "hparams", ",", "\"dec_pos\"", ")", "else", ":", "# Add position signals", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "targets_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", "*", "x_shape", "[", "2", "]", ",", "hparams", ".", "hidden_size", "]", ")", "x", "=", "common_layers", ".", "shift_right_3d", "(", "x", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "targets_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", ",", "x_shape", "[", "2", "]", ",", "hparams", ".", "hidden_size", "]", ")", "x", "=", "add_pos_signals", "(", "x", ",", "hparams", ",", "\"dec_pos\"", ")", "x", "=", "common_layers", ".", "cast_like", "(", "x", ",", "targets", ")", "return", "x", ",", "x_shape", "[", "1", "]", ",", "x_shape", "[", "2", "]" ]
Prepare decoder for images.
[ "Prepare", "decoder", "for", "images", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L572-L629
22,558
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
create_output
def create_output(decoder_output, rows, cols, targets, hparams): """Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5. """ del targets # unused arg decoded_image = postprocess_image(decoder_output, rows, cols, hparams) batch = common_layers.shape_list(decoded_image)[0] depth = common_layers.shape_list(decoded_image)[-1] likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if hparams.mode == tf.estimator.ModeKeys.PREDICT: y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth]) output = y[:, :rows, :, :, :] elif likelihood == DistributionType.CAT: # Unpack the cols dimension of the Categorical. channels = hparams.num_channels output = tf.reshape(decoded_image, [batch, rows, cols // channels, channels, depth]) else: output = decoded_image return output
python
def create_output(decoder_output, rows, cols, targets, hparams): """Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5. """ del targets # unused arg decoded_image = postprocess_image(decoder_output, rows, cols, hparams) batch = common_layers.shape_list(decoded_image)[0] depth = common_layers.shape_list(decoded_image)[-1] likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if hparams.mode == tf.estimator.ModeKeys.PREDICT: y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth]) output = y[:, :rows, :, :, :] elif likelihood == DistributionType.CAT: # Unpack the cols dimension of the Categorical. channels = hparams.num_channels output = tf.reshape(decoded_image, [batch, rows, cols // channels, channels, depth]) else: output = decoded_image return output
[ "def", "create_output", "(", "decoder_output", ",", "rows", ",", "cols", ",", "targets", ",", "hparams", ")", ":", "del", "targets", "# unused arg", "decoded_image", "=", "postprocess_image", "(", "decoder_output", ",", "rows", ",", "cols", ",", "hparams", ")", "batch", "=", "common_layers", ".", "shape_list", "(", "decoded_image", ")", "[", "0", "]", "depth", "=", "common_layers", ".", "shape_list", "(", "decoded_image", ")", "[", "-", "1", "]", "likelihood", "=", "getattr", "(", "hparams", ",", "\"likelihood\"", ",", "DistributionType", ".", "CAT", ")", "if", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ":", "y", "=", "tf", ".", "reshape", "(", "decoded_image", ",", "[", "batch", ",", "-", "1", ",", "1", ",", "1", ",", "depth", "]", ")", "output", "=", "y", "[", ":", ",", ":", "rows", ",", ":", ",", ":", ",", ":", "]", "elif", "likelihood", "==", "DistributionType", ".", "CAT", ":", "# Unpack the cols dimension of the Categorical.", "channels", "=", "hparams", ".", "num_channels", "output", "=", "tf", ".", "reshape", "(", "decoded_image", ",", "[", "batch", ",", "rows", ",", "cols", "//", "channels", ",", "channels", ",", "depth", "]", ")", "else", ":", "output", "=", "decoded_image", "return", "output" ]
Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5.
[ "Creates", "output", "from", "decoder", "output", "and", "vars", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L639-L672
22,559
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
get_channel_embeddings
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"): """Get separate embedding for each of the channels.""" targets_split = tf.split(targets, io_depth, axis=3) rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, [256 * io_depth, hidden_size]) rgb_embedding_var = tf.identity(rgb_embedding_var) rgb_embedding_var *= float(hidden_size)**0.5 channel_target_embs = [] for i in range(io_depth): # Adding the channel offsets to get the right embedding since the # embedding tensor has shape 256 * io_depth, hidden_size target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256 target_embs = common_layers.gather(rgb_embedding_var, target_ids) channel_target_embs.append(target_embs) return tf.concat(channel_target_embs, axis=-1)
python
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"): """Get separate embedding for each of the channels.""" targets_split = tf.split(targets, io_depth, axis=3) rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, [256 * io_depth, hidden_size]) rgb_embedding_var = tf.identity(rgb_embedding_var) rgb_embedding_var *= float(hidden_size)**0.5 channel_target_embs = [] for i in range(io_depth): # Adding the channel offsets to get the right embedding since the # embedding tensor has shape 256 * io_depth, hidden_size target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256 target_embs = common_layers.gather(rgb_embedding_var, target_ids) channel_target_embs.append(target_embs) return tf.concat(channel_target_embs, axis=-1)
[ "def", "get_channel_embeddings", "(", "io_depth", ",", "targets", ",", "hidden_size", ",", "name", "=", "\"channel\"", ")", ":", "targets_split", "=", "tf", ".", "split", "(", "targets", ",", "io_depth", ",", "axis", "=", "3", ")", "rgb_embedding_var", "=", "tf", ".", "get_variable", "(", "\"rgb_target_emb_%s\"", "%", "name", ",", "[", "256", "*", "io_depth", ",", "hidden_size", "]", ")", "rgb_embedding_var", "=", "tf", ".", "identity", "(", "rgb_embedding_var", ")", "rgb_embedding_var", "*=", "float", "(", "hidden_size", ")", "**", "0.5", "channel_target_embs", "=", "[", "]", "for", "i", "in", "range", "(", "io_depth", ")", ":", "# Adding the channel offsets to get the right embedding since the", "# embedding tensor has shape 256 * io_depth, hidden_size", "target_ids", "=", "tf", ".", "squeeze", "(", "targets_split", "[", "i", "]", ",", "axis", "=", "3", ")", "+", "i", "*", "256", "target_embs", "=", "common_layers", ".", "gather", "(", "rgb_embedding_var", ",", "target_ids", ")", "channel_target_embs", ".", "append", "(", "target_embs", ")", "return", "tf", ".", "concat", "(", "channel_target_embs", ",", "axis", "=", "-", "1", ")" ]
Get separate embedding for each of the channels.
[ "Get", "separate", "embedding", "for", "each", "of", "the", "channels", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L675-L690
22,560
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
include_revision
def include_revision(revision_num, skip_factor=1.1): """Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean """ if skip_factor <= 1.0: return True return (int(math.log1p(revision_num) / math.log(skip_factor)) != int( math.log(revision_num + 2.0) / math.log(skip_factor)))
python
def include_revision(revision_num, skip_factor=1.1): """Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean """ if skip_factor <= 1.0: return True return (int(math.log1p(revision_num) / math.log(skip_factor)) != int( math.log(revision_num + 2.0) / math.log(skip_factor)))
[ "def", "include_revision", "(", "revision_num", ",", "skip_factor", "=", "1.1", ")", ":", "if", "skip_factor", "<=", "1.0", ":", "return", "True", "return", "(", "int", "(", "math", ".", "log1p", "(", "revision_num", ")", "/", "math", ".", "log", "(", "skip_factor", ")", ")", "!=", "int", "(", "math", ".", "log", "(", "revision_num", "+", "2.0", ")", "/", "math", ".", "log", "(", "skip_factor", ")", ")", ")" ]
Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean
[ "Decide", "whether", "to", "include", "a", "revision", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L36-L55
22,561
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
file_page_generator
def file_page_generator(my_file, max_page_size=2**28): """Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings """ page_start = " <page>\n" page_end = " </page>\n" chunk_size = max_page_size page_start = " <page>\n" page_end = " </page>\n" leftovers = "" while True: chunk = my_file.read(chunk_size) if not chunk: break chunk = leftovers + chunk current_pos = 0 while True: start_pos = chunk.find(page_start, current_pos) if start_pos == -1: break end_pos = chunk.find(page_end, start_pos) if end_pos == -1: if len(chunk) - start_pos > max_page_size: leftovers = "" else: leftovers = chunk[start_pos:] break raw_page = chunk[start_pos + len(page_start):end_pos] if len(raw_page) < max_page_size: ret = parse_page(raw_page) if ret: yield ret current_pos = end_pos + len(page_end)
python
def file_page_generator(my_file, max_page_size=2**28): """Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings """ page_start = " <page>\n" page_end = " </page>\n" chunk_size = max_page_size page_start = " <page>\n" page_end = " </page>\n" leftovers = "" while True: chunk = my_file.read(chunk_size) if not chunk: break chunk = leftovers + chunk current_pos = 0 while True: start_pos = chunk.find(page_start, current_pos) if start_pos == -1: break end_pos = chunk.find(page_end, start_pos) if end_pos == -1: if len(chunk) - start_pos > max_page_size: leftovers = "" else: leftovers = chunk[start_pos:] break raw_page = chunk[start_pos + len(page_start):end_pos] if len(raw_page) < max_page_size: ret = parse_page(raw_page) if ret: yield ret current_pos = end_pos + len(page_end)
[ "def", "file_page_generator", "(", "my_file", ",", "max_page_size", "=", "2", "**", "28", ")", ":", "page_start", "=", "\" <page>\\n\"", "page_end", "=", "\" </page>\\n\"", "chunk_size", "=", "max_page_size", "page_start", "=", "\" <page>\\n\"", "page_end", "=", "\" </page>\\n\"", "leftovers", "=", "\"\"", "while", "True", ":", "chunk", "=", "my_file", ".", "read", "(", "chunk_size", ")", "if", "not", "chunk", ":", "break", "chunk", "=", "leftovers", "+", "chunk", "current_pos", "=", "0", "while", "True", ":", "start_pos", "=", "chunk", ".", "find", "(", "page_start", ",", "current_pos", ")", "if", "start_pos", "==", "-", "1", ":", "break", "end_pos", "=", "chunk", ".", "find", "(", "page_end", ",", "start_pos", ")", "if", "end_pos", "==", "-", "1", ":", "if", "len", "(", "chunk", ")", "-", "start_pos", ">", "max_page_size", ":", "leftovers", "=", "\"\"", "else", ":", "leftovers", "=", "chunk", "[", "start_pos", ":", "]", "break", "raw_page", "=", "chunk", "[", "start_pos", "+", "len", "(", "page_start", ")", ":", "end_pos", "]", "if", "len", "(", "raw_page", ")", "<", "max_page_size", ":", "ret", "=", "parse_page", "(", "raw_page", ")", "if", "ret", ":", "yield", "ret", "current_pos", "=", "end_pos", "+", "len", "(", "page_end", ")" ]
Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings
[ "Read", "wikipedia", "pages", "from", "a", "history", "dump", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L58-L99
22,562
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
get_id
def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
python
def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
[ "def", "get_id", "(", "page", ")", ":", "start_pos", "=", "page", ".", "find", "(", "\"<id>\"", ")", "end_pos", "=", "page", ".", "find", "(", "\"</id>\"", ")", "assert", "start_pos", "!=", "-", "1", "assert", "end_pos", "!=", "-", "1", "start_pos", "+=", "len", "(", "\"<id>\"", ")", "return", "int", "(", "page", "[", "start_pos", ":", "end_pos", "]", ")" ]
Extract the id from a page. Args: page: a string Returns: an integer
[ "Extract", "the", "id", "from", "a", "page", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L118-L131
22,563
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
get_revisions
def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
python
def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
[ "def", "get_revisions", "(", "page", ")", ":", "start_string", "=", "\" <revision>\\n\"", "end_string", "=", "\" </revision>\\n\"", "ret", "=", "[", "]", "current_pos", "=", "0", "while", "True", ":", "start_pos", "=", "page", ".", "find", "(", "start_string", ",", "current_pos", ")", "if", "start_pos", "==", "-", "1", ":", "break", "end_pos", "=", "page", ".", "find", "(", "end_string", ",", "start_pos", ")", "assert", "end_pos", "!=", "-", "1", "ret", ".", "append", "(", "page", "[", "start_pos", "+", "len", "(", "start_string", ")", ":", "end_pos", "]", ")", "current_pos", "=", "end_pos", "+", "len", "(", "end_string", ")", "return", "ret" ]
Extract the revisions of a page. Args: page: a string Returns: a list of strings
[ "Extract", "the", "revisions", "of", "a", "page", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L134-L154
22,564
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
parse_page
def parse_page(raw_page): """Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error. """ ret = {"title": get_title(raw_page), "id": get_id(raw_page)} if ":" in ret["title"]: return None ret["revisions"] = get_revisions(raw_page) return ret
python
def parse_page(raw_page): """Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error. """ ret = {"title": get_title(raw_page), "id": get_id(raw_page)} if ":" in ret["title"]: return None ret["revisions"] = get_revisions(raw_page) return ret
[ "def", "parse_page", "(", "raw_page", ")", ":", "ret", "=", "{", "\"title\"", ":", "get_title", "(", "raw_page", ")", ",", "\"id\"", ":", "get_id", "(", "raw_page", ")", "}", "if", "\":\"", "in", "ret", "[", "\"title\"", "]", ":", "return", "None", "ret", "[", "\"revisions\"", "]", "=", "get_revisions", "(", "raw_page", ")", "return", "ret" ]
Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error.
[ "Create", "a", "dictionary", "with", "title", "id", "and", "list", "of", "revisions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L157-L175
22,565
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
maybe_copy_file_to_directory
def maybe_copy_file_to_directory(source_filepath, target_directory): """Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string """ if not tf.gfile.Exists(target_directory): tf.logging.info("Creating directory %s" % target_directory) os.mkdir(target_directory) target_filepath = os.path.join(target_directory, os.path.basename(source_filepath)) if not tf.gfile.Exists(target_filepath): tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) tf.gfile.Copy(source_filepath, target_filepath) statinfo = os.stat(target_filepath) tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, statinfo.st_size)) else: tf.logging.info("Not copying, file already found: %s" % target_filepath) return target_filepath
python
def maybe_copy_file_to_directory(source_filepath, target_directory): """Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string """ if not tf.gfile.Exists(target_directory): tf.logging.info("Creating directory %s" % target_directory) os.mkdir(target_directory) target_filepath = os.path.join(target_directory, os.path.basename(source_filepath)) if not tf.gfile.Exists(target_filepath): tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) tf.gfile.Copy(source_filepath, target_filepath) statinfo = os.stat(target_filepath) tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, statinfo.st_size)) else: tf.logging.info("Not copying, file already found: %s" % target_filepath) return target_filepath
[ "def", "maybe_copy_file_to_directory", "(", "source_filepath", ",", "target_directory", ")", ":", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "target_directory", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Creating directory %s\"", "%", "target_directory", ")", "os", ".", "mkdir", "(", "target_directory", ")", "target_filepath", "=", "os", ".", "path", ".", "join", "(", "target_directory", ",", "os", ".", "path", ".", "basename", "(", "source_filepath", ")", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "target_filepath", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Copying %s to %s\"", "%", "(", "source_filepath", ",", "target_filepath", ")", ")", "tf", ".", "gfile", ".", "Copy", "(", "source_filepath", ",", "target_filepath", ")", "statinfo", "=", "os", ".", "stat", "(", "target_filepath", ")", "tf", ".", "logging", ".", "info", "(", "\"Successfully copied %s, %s bytes.\"", "%", "(", "target_filepath", ",", "statinfo", ".", "st_size", ")", ")", "else", ":", "tf", ".", "logging", ".", "info", "(", "\"Not copying, file already found: %s\"", "%", "target_filepath", ")", "return", "target_filepath" ]
Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string
[ "Copy", "a", "file", "to", "a", "directory", "if", "it", "is", "not", "already", "there", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L178-L203
22,566
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
corpus_page_generator
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): """Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings """ for remote_filepath in corpus_files: filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir) tf.logging.info("Reading from " + filepath) command = ["7z", "x", "-so", filepath] tf.logging.info("Running command: %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1) for page in file_page_generator(p.stdout, 2**max_page_size_exp): yield page
python
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): """Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings """ for remote_filepath in corpus_files: filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir) tf.logging.info("Reading from " + filepath) command = ["7z", "x", "-so", filepath] tf.logging.info("Running command: %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1) for page in file_page_generator(p.stdout, 2**max_page_size_exp): yield page
[ "def", "corpus_page_generator", "(", "corpus_files", ",", "tmp_dir", ",", "max_page_size_exp", ")", ":", "for", "remote_filepath", "in", "corpus_files", ":", "filepath", "=", "maybe_copy_file_to_directory", "(", "remote_filepath", ",", "tmp_dir", ")", "tf", ".", "logging", ".", "info", "(", "\"Reading from \"", "+", "filepath", ")", "command", "=", "[", "\"7z\"", ",", "\"x\"", ",", "\"-so\"", ",", "filepath", "]", "tf", ".", "logging", ".", "info", "(", "\"Running command: %s\"", ",", "command", ")", "p", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "bufsize", "=", "-", "1", ")", "for", "page", "in", "file_page_generator", "(", "p", ".", "stdout", ",", "2", "**", "max_page_size_exp", ")", ":", "yield", "page" ]
Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings
[ "Generate", "pages", "from", "a", "list", "of", ".", "7z", "encoded", "history", "dumps", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L206-L228
22,567
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
get_text
def get_text(revision, strip=True): """Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string """ # text start tag looks like "<text ..otherstuff>" start_pos = revision.find("<text") assert start_pos != -1 end_tag_pos = revision.find(">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(">") end_pos = revision.find("</text>") if end_pos == -1: ret = "" else: ret = revision[end_tag_pos:end_pos] if strip: ret = strip_text(ret) ret = text_encoder.to_unicode_utf8(ret) return ret
python
def get_text(revision, strip=True): """Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string """ # text start tag looks like "<text ..otherstuff>" start_pos = revision.find("<text") assert start_pos != -1 end_tag_pos = revision.find(">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(">") end_pos = revision.find("</text>") if end_pos == -1: ret = "" else: ret = revision[end_tag_pos:end_pos] if strip: ret = strip_text(ret) ret = text_encoder.to_unicode_utf8(ret) return ret
[ "def", "get_text", "(", "revision", ",", "strip", "=", "True", ")", ":", "# text start tag looks like \"<text ..otherstuff>\"", "start_pos", "=", "revision", ".", "find", "(", "\"<text\"", ")", "assert", "start_pos", "!=", "-", "1", "end_tag_pos", "=", "revision", ".", "find", "(", "\">\"", ",", "start_pos", ")", "assert", "end_tag_pos", "!=", "-", "1", "end_tag_pos", "+=", "len", "(", "\">\"", ")", "end_pos", "=", "revision", ".", "find", "(", "\"</text>\"", ")", "if", "end_pos", "==", "-", "1", ":", "ret", "=", "\"\"", "else", ":", "ret", "=", "revision", "[", "end_tag_pos", ":", "end_pos", "]", "if", "strip", ":", "ret", "=", "strip_text", "(", "ret", ")", "ret", "=", "text_encoder", ".", "to_unicode_utf8", "(", "ret", ")", "return", "ret" ]
Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string
[ "Extract", "the", "text", "from", "a", "revision", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L231-L255
22,568
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
_remove_curly_braces
def _remove_curly_braces(text): """Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string """ current_pos = 0 depth = 0 ret = "" for match in re.finditer("[{}]", text): if depth == 0: ret += text[current_pos:match.start()] depth += 1 if text[match.start()] == "{" else -1 current_pos = match.end() if depth != 0: # Many articles have mismatched braces, but it still seems better to remove # them than not. pass else: ret += text[current_pos:] return ret
python
def _remove_curly_braces(text): """Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string """ current_pos = 0 depth = 0 ret = "" for match in re.finditer("[{}]", text): if depth == 0: ret += text[current_pos:match.start()] depth += 1 if text[match.start()] == "{" else -1 current_pos = match.end() if depth != 0: # Many articles have mismatched braces, but it still seems better to remove # them than not. pass else: ret += text[current_pos:] return ret
[ "def", "_remove_curly_braces", "(", "text", ")", ":", "current_pos", "=", "0", "depth", "=", "0", "ret", "=", "\"\"", "for", "match", "in", "re", ".", "finditer", "(", "\"[{}]\"", ",", "text", ")", ":", "if", "depth", "==", "0", ":", "ret", "+=", "text", "[", "current_pos", ":", "match", ".", "start", "(", ")", "]", "depth", "+=", "1", "if", "text", "[", "match", ".", "start", "(", ")", "]", "==", "\"{\"", "else", "-", "1", "current_pos", "=", "match", ".", "end", "(", ")", "if", "depth", "!=", "0", ":", "# Many articles have mismatched braces, but it still seems better to remove", "# them than not.", "pass", "else", ":", "ret", "+=", "text", "[", "current_pos", ":", "]", "return", "ret" ]
Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string
[ "Remove", "everything", "in", "curly", "braces", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L316-L340
22,569
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
_remove_double_brackets
def _remove_double_brackets(text): """Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string """ def replacement_fn(s): if ":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s.find("|") if bar_pos == -1: return s return s[bar_pos + 1:] return _find_and_replace(text, "[[", "]]", replacement_fn)
python
def _remove_double_brackets(text): """Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string """ def replacement_fn(s): if ":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s.find("|") if bar_pos == -1: return s return s[bar_pos + 1:] return _find_and_replace(text, "[[", "]]", replacement_fn)
[ "def", "_remove_double_brackets", "(", "text", ")", ":", "def", "replacement_fn", "(", "s", ")", ":", "if", "\":\"", "in", "s", ":", "# this is probably a category or something like that.", "return", "\"\"", "# keep the part after the bar.", "bar_pos", "=", "s", ".", "find", "(", "\"|\"", ")", "if", "bar_pos", "==", "-", "1", ":", "return", "s", "return", "s", "[", "bar_pos", "+", "1", ":", "]", "return", "_find_and_replace", "(", "text", ",", "\"[[\"", ",", "\"]]\"", ",", "replacement_fn", ")" ]
Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string
[ "Remove", "double", "brackets", "but", "leave", "the", "viewable", "text", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L343-L362
22,570
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
_remove_boring_lines
def _remove_boring_lines(text): """Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string """ lines = text.split("\n") filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)] return "\n".join(filtered)
python
def _remove_boring_lines(text): """Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string """ lines = text.split("\n") filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)] return "\n".join(filtered)
[ "def", "_remove_boring_lines", "(", "text", ")", ":", "lines", "=", "text", ".", "split", "(", "\"\\n\"", ")", "filtered", "=", "[", "line", "for", "line", "in", "lines", "if", "re", ".", "match", "(", "\"[a-zA-z\\\"\\']\"", ",", "line", ")", "]", "return", "\"\\n\"", ".", "join", "(", "filtered", ")" ]
Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string
[ "Remove", "lines", "that", "do", "not", "start", "with", "a", "letter", "or", "a", "quote", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L365-L378
22,571
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
get_or_generate_vocabulary
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True): """Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder """ num_pages_for_vocab_generation = approx_vocab_size // 3 vocab_file = vocab_filename(approx_vocab_size, strip) def my_generator(data_prefix): """Line generator for vocab.""" count = 0 for page in corpus_page_generator( all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp): revisions = page["revisions"] if revisions: text = get_text(revisions[-1], strip=strip) yield text count += 1 if count % 100 == 0: tf.logging.info("reading pages for vocab %d" % count) if count > num_pages_for_vocab_generation: break return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))
python
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True): """Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder """ num_pages_for_vocab_generation = approx_vocab_size // 3 vocab_file = vocab_filename(approx_vocab_size, strip) def my_generator(data_prefix): """Line generator for vocab.""" count = 0 for page in corpus_page_generator( all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp): revisions = page["revisions"] if revisions: text = get_text(revisions[-1], strip=strip) yield text count += 1 if count % 100 == 0: tf.logging.info("reading pages for vocab %d" % count) if count > num_pages_for_vocab_generation: break return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))
[ "def", "get_or_generate_vocabulary", "(", "data_dir", ",", "tmp_dir", ",", "data_prefix", ",", "max_page_size_exp", ",", "approx_vocab_size", "=", "32768", ",", "strip", "=", "True", ")", ":", "num_pages_for_vocab_generation", "=", "approx_vocab_size", "//", "3", "vocab_file", "=", "vocab_filename", "(", "approx_vocab_size", ",", "strip", ")", "def", "my_generator", "(", "data_prefix", ")", ":", "\"\"\"Line generator for vocab.\"\"\"", "count", "=", "0", "for", "page", "in", "corpus_page_generator", "(", "all_corpus_files", "(", "data_prefix", ")", "[", ":", ":", "-", "1", "]", ",", "tmp_dir", ",", "max_page_size_exp", ")", ":", "revisions", "=", "page", "[", "\"revisions\"", "]", "if", "revisions", ":", "text", "=", "get_text", "(", "revisions", "[", "-", "1", "]", ",", "strip", "=", "strip", ")", "yield", "text", "count", "+=", "1", "if", "count", "%", "100", "==", "0", ":", "tf", ".", "logging", ".", "info", "(", "\"reading pages for vocab %d\"", "%", "count", ")", "if", "count", ">", "num_pages_for_vocab_generation", ":", "break", "return", "generator_utils", ".", "get_or_generate_vocab_inner", "(", "data_dir", ",", "vocab_file", ",", "approx_vocab_size", ",", "my_generator", "(", "data_prefix", ")", ")" ]
Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder
[ "Get", "or", "generate", "the", "vocabulary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L401-L440
22,572
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
get_encoder_from_vocab
def get_encoder_from_vocab(vocab_filepath): """Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set. """ if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
python
def get_encoder_from_vocab(vocab_filepath): """Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set. """ if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
[ "def", "get_encoder_from_vocab", "(", "vocab_filepath", ")", ":", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "vocab_filepath", ")", ":", "raise", "ValueError", "(", "\"Vocab file does not exist: {}.\"", ".", "format", "(", "vocab_filepath", ")", ")", "tf", ".", "logging", ".", "info", "(", "\"Found vocab file: %s\"", ",", "vocab_filepath", ")", "encoder", "=", "text_encoder", ".", "SubwordTextEncoder", "(", "vocab_filepath", ")", "return", "encoder" ]
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set.
[ "Get", "encoder", "from", "vocab", "file", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L443-L461
22,573
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
edit_distance_filter
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): """Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out """ thrown_out_count = 0 source_target_output = [] if not max_equal_to_diff_ratio: return source_target_input, thrown_out_count for src_tgt in source_target_input: opcodes = fast_match_sequences(*src_tgt) diff_char_count = 0 equal_char_count = 0 for tag, i1, i2, j1, j2 in opcodes: if tag == "diff": # max() prevents double-counting substitutions. diff_char_count += max(i2 - i1, j2 - j1) else: equal_char_count += i2 - i1 if diff_char_count <= max_equal_to_diff_ratio * equal_char_count: source_target_output.append(src_tgt) else: thrown_out_count += 1 return source_target_output, thrown_out_count
python
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): """Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out """ thrown_out_count = 0 source_target_output = [] if not max_equal_to_diff_ratio: return source_target_input, thrown_out_count for src_tgt in source_target_input: opcodes = fast_match_sequences(*src_tgt) diff_char_count = 0 equal_char_count = 0 for tag, i1, i2, j1, j2 in opcodes: if tag == "diff": # max() prevents double-counting substitutions. diff_char_count += max(i2 - i1, j2 - j1) else: equal_char_count += i2 - i1 if diff_char_count <= max_equal_to_diff_ratio * equal_char_count: source_target_output.append(src_tgt) else: thrown_out_count += 1 return source_target_output, thrown_out_count
[ "def", "edit_distance_filter", "(", "source_target_input", ",", "max_equal_to_diff_ratio", "=", "0", ")", ":", "thrown_out_count", "=", "0", "source_target_output", "=", "[", "]", "if", "not", "max_equal_to_diff_ratio", ":", "return", "source_target_input", ",", "thrown_out_count", "for", "src_tgt", "in", "source_target_input", ":", "opcodes", "=", "fast_match_sequences", "(", "*", "src_tgt", ")", "diff_char_count", "=", "0", "equal_char_count", "=", "0", "for", "tag", ",", "i1", ",", "i2", ",", "j1", ",", "j2", "in", "opcodes", ":", "if", "tag", "==", "\"diff\"", ":", "# max() prevents double-counting substitutions.", "diff_char_count", "+=", "max", "(", "i2", "-", "i1", ",", "j2", "-", "j1", ")", "else", ":", "equal_char_count", "+=", "i2", "-", "i1", "if", "diff_char_count", "<=", "max_equal_to_diff_ratio", "*", "equal_char_count", ":", "source_target_output", ".", "append", "(", "src_tgt", ")", "else", ":", "thrown_out_count", "+=", "1", "return", "source_target_output", ",", "thrown_out_count" ]
Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out
[ "Filter", "out", "examples", "that", "exceed", "max_edit_ratio", "between", "source", "and", "target", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L476-L508
22,574
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
introduce_errors
def introduce_errors(s, corruption_rate=3e-3, infill_marker="|?|", max_infill_len=8): """Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string """ num_errors = 0 ret = [] operations = [ "delete", # delete a character "insert", # insert a random character from the input string "replace", # replace a character with a random character from # the input string "transpose", # transpose two adjacent characters ] if max_infill_len: operations.append("infill") pos = 0 while pos < len(s): if random.random() >= corruption_rate: ret.append(s[pos]) pos += 1 continue num_errors += 1 operation = operations[random.randint(0, len(operations) - 1)] if operation == "delete": pos += 1 elif operation == "insert": ret.append(s[random.randint(0, len(s) - 1)]) elif operation == "replace": ret.append(s[random.randint(0, len(s) - 1)]) pos += 1 elif operation == "transpose": ret.append(s[pos + 1] if pos + 1 < len(s) else "") ret.append(s[pos]) pos += 2 else: assert operation == "infill" ret.append(infill_marker) pos += random.randint(0, max_infill_len) return "".join(ret), num_errors
python
def introduce_errors(s, corruption_rate=3e-3, infill_marker="|?|", max_infill_len=8): """Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string """ num_errors = 0 ret = [] operations = [ "delete", # delete a character "insert", # insert a random character from the input string "replace", # replace a character with a random character from # the input string "transpose", # transpose two adjacent characters ] if max_infill_len: operations.append("infill") pos = 0 while pos < len(s): if random.random() >= corruption_rate: ret.append(s[pos]) pos += 1 continue num_errors += 1 operation = operations[random.randint(0, len(operations) - 1)] if operation == "delete": pos += 1 elif operation == "insert": ret.append(s[random.randint(0, len(s) - 1)]) elif operation == "replace": ret.append(s[random.randint(0, len(s) - 1)]) pos += 1 elif operation == "transpose": ret.append(s[pos + 1] if pos + 1 < len(s) else "") ret.append(s[pos]) pos += 2 else: assert operation == "infill" ret.append(infill_marker) pos += random.randint(0, max_infill_len) return "".join(ret), num_errors
[ "def", "introduce_errors", "(", "s", ",", "corruption_rate", "=", "3e-3", ",", "infill_marker", "=", "\"|?|\"", ",", "max_infill_len", "=", "8", ")", ":", "num_errors", "=", "0", "ret", "=", "[", "]", "operations", "=", "[", "\"delete\"", ",", "# delete a character", "\"insert\"", ",", "# insert a random character from the input string", "\"replace\"", ",", "# replace a character with a random character from", "# the input string", "\"transpose\"", ",", "# transpose two adjacent characters", "]", "if", "max_infill_len", ":", "operations", ".", "append", "(", "\"infill\"", ")", "pos", "=", "0", "while", "pos", "<", "len", "(", "s", ")", ":", "if", "random", ".", "random", "(", ")", ">=", "corruption_rate", ":", "ret", ".", "append", "(", "s", "[", "pos", "]", ")", "pos", "+=", "1", "continue", "num_errors", "+=", "1", "operation", "=", "operations", "[", "random", ".", "randint", "(", "0", ",", "len", "(", "operations", ")", "-", "1", ")", "]", "if", "operation", "==", "\"delete\"", ":", "pos", "+=", "1", "elif", "operation", "==", "\"insert\"", ":", "ret", ".", "append", "(", "s", "[", "random", ".", "randint", "(", "0", ",", "len", "(", "s", ")", "-", "1", ")", "]", ")", "elif", "operation", "==", "\"replace\"", ":", "ret", ".", "append", "(", "s", "[", "random", ".", "randint", "(", "0", ",", "len", "(", "s", ")", "-", "1", ")", "]", ")", "pos", "+=", "1", "elif", "operation", "==", "\"transpose\"", ":", "ret", ".", "append", "(", "s", "[", "pos", "+", "1", "]", "if", "pos", "+", "1", "<", "len", "(", "s", ")", "else", "\"\"", ")", "ret", ".", "append", "(", "s", "[", "pos", "]", ")", "pos", "+=", "2", "else", ":", "assert", "operation", "==", "\"infill\"", "ret", ".", "append", "(", "infill_marker", ")", "pos", "+=", "random", ".", "randint", "(", "0", ",", "max_infill_len", ")", "return", "\"\"", ".", "join", "(", "ret", ")", ",", "num_errors" ]
Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string
[ "Artificially", "add", "spelling", "errors", "and", "infill", "markers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L511-L574
22,575
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_revision_utils.py
fast_match_sequences
def fast_match_sequences(a, b, a_start=0, a_end=None, b_start=0, b_end=None, min_match_length=3, max_recursion_depth=128): """Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes. """ if a_end is None: a_end = len(a) if b_end is None: b_end = len(b) if a_start == a_end and b_start == b_end: return [] if a_start == a_end or b_start == b_end: return [("diff", a_start, a_end, b_start, b_end)] # Compute an index from value to first occurrence in the b segment. # Technically, we should index and explore all occurrences of a value, # but that might be much slower. b_index = {} for j in range(b_end - 1, b_start - 1, -1): b_index[b[j]] = j # we will look for the longest match we can find. max_match_length = 0 a_pos = a_start while a_pos < a_end: val = a[a_pos] b_pos = b_index.get(val) if b_pos is None: a_pos += 1 continue else: a_match_start = a_pos a_match_end = a_pos + 1 b_match_start = b_pos b_match_end = b_pos + 1 while (a_match_start > a_start and b_match_start > b_start and a[a_match_start - 1] == b[b_match_start - 1]): a_match_start -= 1 b_match_start -= 1 while (a_match_end < a_end and b_match_end < b_end and a[a_match_end] == b[b_match_end]): a_match_end += 1 b_match_end += 1 # Compute the length of the matching segment. We prefer the longest. match_length = a_match_end - a_match_start # Extra credit for matching at the beginning or end of the sequence. if a_match_start == 0 and b_match_start == 0: match_length += min_match_length if a_match_end == len(a) and b_match_end == len(b): match_length += min_match_length if match_length > max_match_length: max_match_length = match_length best_match = (a_match_start, a_match_end, b_match_start, b_match_end) # advance a_pos to the end of this match to avoid wasting time # rediscovering this match. a_pos = a_match_end if max_match_length < min_match_length or max_recursion_depth == 0: return [("diff", a_start, a_end, b_start, b_end)] a_match_start, a_match_end, b_match_start, b_match_end = best_match return (fast_match_sequences( a, b, a_start, a_match_start, b_start, b_match_start, min_match_length, max_recursion_depth - 1) + [ ("equal", a_match_start, a_match_end, b_match_start, b_match_end) ] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end, min_match_length, max_recursion_depth - 1))
python
def fast_match_sequences(a, b, a_start=0, a_end=None, b_start=0, b_end=None, min_match_length=3, max_recursion_depth=128): """Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes. """ if a_end is None: a_end = len(a) if b_end is None: b_end = len(b) if a_start == a_end and b_start == b_end: return [] if a_start == a_end or b_start == b_end: return [("diff", a_start, a_end, b_start, b_end)] # Compute an index from value to first occurrence in the b segment. # Technically, we should index and explore all occurrences of a value, # but that might be much slower. b_index = {} for j in range(b_end - 1, b_start - 1, -1): b_index[b[j]] = j # we will look for the longest match we can find. max_match_length = 0 a_pos = a_start while a_pos < a_end: val = a[a_pos] b_pos = b_index.get(val) if b_pos is None: a_pos += 1 continue else: a_match_start = a_pos a_match_end = a_pos + 1 b_match_start = b_pos b_match_end = b_pos + 1 while (a_match_start > a_start and b_match_start > b_start and a[a_match_start - 1] == b[b_match_start - 1]): a_match_start -= 1 b_match_start -= 1 while (a_match_end < a_end and b_match_end < b_end and a[a_match_end] == b[b_match_end]): a_match_end += 1 b_match_end += 1 # Compute the length of the matching segment. We prefer the longest. match_length = a_match_end - a_match_start # Extra credit for matching at the beginning or end of the sequence. if a_match_start == 0 and b_match_start == 0: match_length += min_match_length if a_match_end == len(a) and b_match_end == len(b): match_length += min_match_length if match_length > max_match_length: max_match_length = match_length best_match = (a_match_start, a_match_end, b_match_start, b_match_end) # advance a_pos to the end of this match to avoid wasting time # rediscovering this match. a_pos = a_match_end if max_match_length < min_match_length or max_recursion_depth == 0: return [("diff", a_start, a_end, b_start, b_end)] a_match_start, a_match_end, b_match_start, b_match_end = best_match return (fast_match_sequences( a, b, a_start, a_match_start, b_start, b_match_start, min_match_length, max_recursion_depth - 1) + [ ("equal", a_match_start, a_match_end, b_match_start, b_match_end) ] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end, min_match_length, max_recursion_depth - 1))
[ "def", "fast_match_sequences", "(", "a", ",", "b", ",", "a_start", "=", "0", ",", "a_end", "=", "None", ",", "b_start", "=", "0", ",", "b_end", "=", "None", ",", "min_match_length", "=", "3", ",", "max_recursion_depth", "=", "128", ")", ":", "if", "a_end", "is", "None", ":", "a_end", "=", "len", "(", "a", ")", "if", "b_end", "is", "None", ":", "b_end", "=", "len", "(", "b", ")", "if", "a_start", "==", "a_end", "and", "b_start", "==", "b_end", ":", "return", "[", "]", "if", "a_start", "==", "a_end", "or", "b_start", "==", "b_end", ":", "return", "[", "(", "\"diff\"", ",", "a_start", ",", "a_end", ",", "b_start", ",", "b_end", ")", "]", "# Compute an index from value to first occurrence in the b segment.", "# Technically, we should index and explore all occurrences of a value,", "# but that might be much slower.", "b_index", "=", "{", "}", "for", "j", "in", "range", "(", "b_end", "-", "1", ",", "b_start", "-", "1", ",", "-", "1", ")", ":", "b_index", "[", "b", "[", "j", "]", "]", "=", "j", "# we will look for the longest match we can find.", "max_match_length", "=", "0", "a_pos", "=", "a_start", "while", "a_pos", "<", "a_end", ":", "val", "=", "a", "[", "a_pos", "]", "b_pos", "=", "b_index", ".", "get", "(", "val", ")", "if", "b_pos", "is", "None", ":", "a_pos", "+=", "1", "continue", "else", ":", "a_match_start", "=", "a_pos", "a_match_end", "=", "a_pos", "+", "1", "b_match_start", "=", "b_pos", "b_match_end", "=", "b_pos", "+", "1", "while", "(", "a_match_start", ">", "a_start", "and", "b_match_start", ">", "b_start", "and", "a", "[", "a_match_start", "-", "1", "]", "==", "b", "[", "b_match_start", "-", "1", "]", ")", ":", "a_match_start", "-=", "1", "b_match_start", "-=", "1", "while", "(", "a_match_end", "<", "a_end", "and", "b_match_end", "<", "b_end", "and", "a", "[", "a_match_end", "]", "==", "b", "[", "b_match_end", "]", ")", ":", "a_match_end", "+=", "1", "b_match_end", "+=", "1", "# Compute the length of the matching segment. We prefer the longest.", "match_length", "=", "a_match_end", "-", "a_match_start", "# Extra credit for matching at the beginning or end of the sequence.", "if", "a_match_start", "==", "0", "and", "b_match_start", "==", "0", ":", "match_length", "+=", "min_match_length", "if", "a_match_end", "==", "len", "(", "a", ")", "and", "b_match_end", "==", "len", "(", "b", ")", ":", "match_length", "+=", "min_match_length", "if", "match_length", ">", "max_match_length", ":", "max_match_length", "=", "match_length", "best_match", "=", "(", "a_match_start", ",", "a_match_end", ",", "b_match_start", ",", "b_match_end", ")", "# advance a_pos to the end of this match to avoid wasting time", "# rediscovering this match.", "a_pos", "=", "a_match_end", "if", "max_match_length", "<", "min_match_length", "or", "max_recursion_depth", "==", "0", ":", "return", "[", "(", "\"diff\"", ",", "a_start", ",", "a_end", ",", "b_start", ",", "b_end", ")", "]", "a_match_start", ",", "a_match_end", ",", "b_match_start", ",", "b_match_end", "=", "best_match", "return", "(", "fast_match_sequences", "(", "a", ",", "b", ",", "a_start", ",", "a_match_start", ",", "b_start", ",", "b_match_start", ",", "min_match_length", ",", "max_recursion_depth", "-", "1", ")", "+", "[", "(", "\"equal\"", ",", "a_match_start", ",", "a_match_end", ",", "b_match_start", ",", "b_match_end", ")", "]", "+", "fast_match_sequences", "(", "a", ",", "b", ",", "a_match_end", ",", "a_end", ",", "b_match_end", ",", "b_end", ",", "min_match_length", ",", "max_recursion_depth", "-", "1", ")", ")" ]
Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes.
[ "Compute", "diffs", "between", "two", "sequences", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L577-L675
22,576
tensorflow/tensor2tensor
tensor2tensor/utils/restore_hook.py
RestoreHook.begin
def begin(self): """Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. """ variables_to_restore = tf.contrib.framework.get_variables_to_restore( include=self._include, exclude=self._exclude) # remove new_model_scope from variable name prefix assignment_map = {variable.name[len(self._new_model_scope):]: variable for variable in variables_to_restore if variable.name.startswith(self._new_model_scope)} # remove :0 from variable name suffix assignment_map = {name.split(":")[0]: variable for name, variable in six.iteritems(assignment_map) if name.startswith(self._old_model_scope)} self._assignment_map = assignment_map tf.logging.info("restoring %d variables from checkpoint %s"%( len(assignment_map), self._checkpoint_path)) tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map)
python
def begin(self): """Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. """ variables_to_restore = tf.contrib.framework.get_variables_to_restore( include=self._include, exclude=self._exclude) # remove new_model_scope from variable name prefix assignment_map = {variable.name[len(self._new_model_scope):]: variable for variable in variables_to_restore if variable.name.startswith(self._new_model_scope)} # remove :0 from variable name suffix assignment_map = {name.split(":")[0]: variable for name, variable in six.iteritems(assignment_map) if name.startswith(self._old_model_scope)} self._assignment_map = assignment_map tf.logging.info("restoring %d variables from checkpoint %s"%( len(assignment_map), self._checkpoint_path)) tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map)
[ "def", "begin", "(", "self", ")", ":", "variables_to_restore", "=", "tf", ".", "contrib", ".", "framework", ".", "get_variables_to_restore", "(", "include", "=", "self", ".", "_include", ",", "exclude", "=", "self", ".", "_exclude", ")", "# remove new_model_scope from variable name prefix", "assignment_map", "=", "{", "variable", ".", "name", "[", "len", "(", "self", ".", "_new_model_scope", ")", ":", "]", ":", "variable", "for", "variable", "in", "variables_to_restore", "if", "variable", ".", "name", ".", "startswith", "(", "self", ".", "_new_model_scope", ")", "}", "# remove :0 from variable name suffix", "assignment_map", "=", "{", "name", ".", "split", "(", "\":\"", ")", "[", "0", "]", ":", "variable", "for", "name", ",", "variable", "in", "six", ".", "iteritems", "(", "assignment_map", ")", "if", "name", ".", "startswith", "(", "self", ".", "_old_model_scope", ")", "}", "self", ".", "_assignment_map", "=", "assignment_map", "tf", ".", "logging", ".", "info", "(", "\"restoring %d variables from checkpoint %s\"", "%", "(", "len", "(", "assignment_map", ")", ",", "self", ".", "_checkpoint_path", ")", ")", "tf", ".", "train", ".", "init_from_checkpoint", "(", "self", ".", "_checkpoint_path", ",", "self", ".", "_assignment_map", ")" ]
Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0.
[ "Load", "variables", "from", "checkpoint", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/restore_hook.py#L38-L61
22,577
tensorflow/tensor2tensor
tensor2tensor/envs/time_step.py
TimeStep.create_time_step
def create_time_step(cls, observation=None, done=False, raw_reward=None, processed_reward=None, action=None): """Creates a TimeStep with both rewards and actions as optional.""" return cls(observation, done, raw_reward, processed_reward, action)
python
def create_time_step(cls, observation=None, done=False, raw_reward=None, processed_reward=None, action=None): """Creates a TimeStep with both rewards and actions as optional.""" return cls(observation, done, raw_reward, processed_reward, action)
[ "def", "create_time_step", "(", "cls", ",", "observation", "=", "None", ",", "done", "=", "False", ",", "raw_reward", "=", "None", ",", "processed_reward", "=", "None", ",", "action", "=", "None", ")", ":", "return", "cls", "(", "observation", ",", "done", ",", "raw_reward", ",", "processed_reward", ",", "action", ")" ]
Creates a TimeStep with both rewards and actions as optional.
[ "Creates", "a", "TimeStep", "with", "both", "rewards", "and", "actions", "as", "optional", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/time_step.py#L59-L67
22,578
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
attention
def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None): """Complete attention layer with preprocessing.""" separabilities = [hparams.separability, hparams.separability] if hparams.separability < 0: separabilities = [hparams.separability - 1, hparams.separability] targets_timed = common_layers.subseparable_conv_block( common_layers.add_timing_signal(targets_shifted), hparams.hidden_size, [((1, 1), (5, 1)), ((4, 1), (5, 1))], normalizer_fn=norm_fn, padding="LEFT", separabilities=separabilities, name="targets_time") if hparams.attention_type == "transformer": targets_timed = tf.squeeze(targets_timed, 2) target_shape = tf.shape(targets_timed) targets_segment = tf.zeros([target_shape[0], target_shape[1]]) target_attention_bias = common_attention.attention_bias( targets_segment, targets_segment, lower_triangular=True) inputs_attention_bias = tf.zeros([ tf.shape(inputs_encoded)[0], hparams.num_heads, tf.shape(targets_segment)[1], tf.shape(inputs_encoded)[1] ]) qv = common_attention.multihead_attention( targets_timed, None, target_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="self_attention") qv = common_attention.multihead_attention( qv, inputs_encoded, inputs_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="encdec_attention") return tf.expand_dims(qv, 2) elif hparams.attention_type == "simple": targets_with_attention = common_layers.simple_attention( targets_timed, inputs_encoded, bias=bias) return norm_fn(targets_shifted + targets_with_attention, name="attn_norm")
python
def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None): """Complete attention layer with preprocessing.""" separabilities = [hparams.separability, hparams.separability] if hparams.separability < 0: separabilities = [hparams.separability - 1, hparams.separability] targets_timed = common_layers.subseparable_conv_block( common_layers.add_timing_signal(targets_shifted), hparams.hidden_size, [((1, 1), (5, 1)), ((4, 1), (5, 1))], normalizer_fn=norm_fn, padding="LEFT", separabilities=separabilities, name="targets_time") if hparams.attention_type == "transformer": targets_timed = tf.squeeze(targets_timed, 2) target_shape = tf.shape(targets_timed) targets_segment = tf.zeros([target_shape[0], target_shape[1]]) target_attention_bias = common_attention.attention_bias( targets_segment, targets_segment, lower_triangular=True) inputs_attention_bias = tf.zeros([ tf.shape(inputs_encoded)[0], hparams.num_heads, tf.shape(targets_segment)[1], tf.shape(inputs_encoded)[1] ]) qv = common_attention.multihead_attention( targets_timed, None, target_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="self_attention") qv = common_attention.multihead_attention( qv, inputs_encoded, inputs_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="encdec_attention") return tf.expand_dims(qv, 2) elif hparams.attention_type == "simple": targets_with_attention = common_layers.simple_attention( targets_timed, inputs_encoded, bias=bias) return norm_fn(targets_shifted + targets_with_attention, name="attn_norm")
[ "def", "attention", "(", "targets_shifted", ",", "inputs_encoded", ",", "norm_fn", ",", "hparams", ",", "bias", "=", "None", ")", ":", "separabilities", "=", "[", "hparams", ".", "separability", ",", "hparams", ".", "separability", "]", "if", "hparams", ".", "separability", "<", "0", ":", "separabilities", "=", "[", "hparams", ".", "separability", "-", "1", ",", "hparams", ".", "separability", "]", "targets_timed", "=", "common_layers", ".", "subseparable_conv_block", "(", "common_layers", ".", "add_timing_signal", "(", "targets_shifted", ")", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "5", ",", "1", ")", ")", ",", "(", "(", "4", ",", "1", ")", ",", "(", "5", ",", "1", ")", ")", "]", ",", "normalizer_fn", "=", "norm_fn", ",", "padding", "=", "\"LEFT\"", ",", "separabilities", "=", "separabilities", ",", "name", "=", "\"targets_time\"", ")", "if", "hparams", ".", "attention_type", "==", "\"transformer\"", ":", "targets_timed", "=", "tf", ".", "squeeze", "(", "targets_timed", ",", "2", ")", "target_shape", "=", "tf", ".", "shape", "(", "targets_timed", ")", "targets_segment", "=", "tf", ".", "zeros", "(", "[", "target_shape", "[", "0", "]", ",", "target_shape", "[", "1", "]", "]", ")", "target_attention_bias", "=", "common_attention", ".", "attention_bias", "(", "targets_segment", ",", "targets_segment", ",", "lower_triangular", "=", "True", ")", "inputs_attention_bias", "=", "tf", ".", "zeros", "(", "[", "tf", ".", "shape", "(", "inputs_encoded", ")", "[", "0", "]", ",", "hparams", ".", "num_heads", ",", "tf", ".", "shape", "(", "targets_segment", ")", "[", "1", "]", ",", "tf", ".", "shape", "(", "inputs_encoded", ")", "[", "1", "]", "]", ")", "qv", "=", "common_attention", ".", "multihead_attention", "(", "targets_timed", ",", "None", ",", "target_attention_bias", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "name", "=", "\"self_attention\"", ")", "qv", "=", "common_attention", ".", "multihead_attention", "(", "qv", ",", "inputs_encoded", ",", "inputs_attention_bias", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "name", "=", "\"encdec_attention\"", ")", "return", "tf", ".", "expand_dims", "(", "qv", ",", "2", ")", "elif", "hparams", ".", "attention_type", "==", "\"simple\"", ":", "targets_with_attention", "=", "common_layers", ".", "simple_attention", "(", "targets_timed", ",", "inputs_encoded", ",", "bias", "=", "bias", ")", "return", "norm_fn", "(", "targets_shifted", "+", "targets_with_attention", ",", "name", "=", "\"attn_norm\"", ")" ]
Complete attention layer with preprocessing.
[ "Complete", "attention", "layer", "with", "preprocessing", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L33-L81
22,579
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
multi_conv_res
def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None): """A stack of separable convolution blocks with residual connections.""" with tf.variable_scope(name): padding_bias = None if mask is not None: padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. if padding == "LEFT": # Do not mask anything when left-padding. mask = None if (hparams.kernel_scheme in _KERNEL_SCHEMES and hparams.dilation_scheme in _DILATION_SCHEMES): kernels = _KERNEL_SCHEMES[hparams.kernel_scheme] dilations = _DILATION_SCHEMES[hparams.dilation_scheme] dilations_and_kernels = list(zip(dilations, kernels)) dilations_and_kernels1 = dilations_and_kernels[:2] dilations_and_kernels2 = dilations_and_kernels[2:] else: k = (hparams.kernel_height, hparams.kernel_width) k2 = (hparams.large_kernel_size, 1) dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)] dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)] separabilities1 = [hparams.separability, hparams.separability] separabilities2 = [hparams.separability] * len(dilations_and_kernels2) if hparams.separability < 0: separabilities1 = [hparams.separability - 1, hparams.separability] separabilities2 = [ hparams.separability - i for i in reversed(range(len(dilations_and_kernels2))) ] def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm( x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) for layer in range(layers): with tf.variable_scope("layer_%d" % layer): y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels1, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities1, name="residual1") x += common_layers.subseparable_conv_block( x + y, hparams.hidden_size, dilations_and_kernels2, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities2, name="residual2") + y if source is not None and hparams.attention_type != "none": x += attention(x, source, norm_fn, hparams, bias=padding_bias) if mask is not None: x *= mask return tf.nn.dropout(x, 1.0 - hparams.dropout)
python
def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None): """A stack of separable convolution blocks with residual connections.""" with tf.variable_scope(name): padding_bias = None if mask is not None: padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. if padding == "LEFT": # Do not mask anything when left-padding. mask = None if (hparams.kernel_scheme in _KERNEL_SCHEMES and hparams.dilation_scheme in _DILATION_SCHEMES): kernels = _KERNEL_SCHEMES[hparams.kernel_scheme] dilations = _DILATION_SCHEMES[hparams.dilation_scheme] dilations_and_kernels = list(zip(dilations, kernels)) dilations_and_kernels1 = dilations_and_kernels[:2] dilations_and_kernels2 = dilations_and_kernels[2:] else: k = (hparams.kernel_height, hparams.kernel_width) k2 = (hparams.large_kernel_size, 1) dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)] dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)] separabilities1 = [hparams.separability, hparams.separability] separabilities2 = [hparams.separability] * len(dilations_and_kernels2) if hparams.separability < 0: separabilities1 = [hparams.separability - 1, hparams.separability] separabilities2 = [ hparams.separability - i for i in reversed(range(len(dilations_and_kernels2))) ] def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm( x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) for layer in range(layers): with tf.variable_scope("layer_%d" % layer): y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels1, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities1, name="residual1") x += common_layers.subseparable_conv_block( x + y, hparams.hidden_size, dilations_and_kernels2, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities2, name="residual2") + y if source is not None and hparams.attention_type != "none": x += attention(x, source, norm_fn, hparams, bias=padding_bias) if mask is not None: x *= mask return tf.nn.dropout(x, 1.0 - hparams.dropout)
[ "def", "multi_conv_res", "(", "x", ",", "padding", ",", "name", ",", "layers", ",", "hparams", ",", "mask", "=", "None", ",", "source", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "padding_bias", "=", "None", "if", "mask", "is", "not", "None", ":", "padding_bias", "=", "(", "1.0", "-", "mask", ")", "*", "-", "1e9", "# Bias to not attend to padding.", "if", "padding", "==", "\"LEFT\"", ":", "# Do not mask anything when left-padding.", "mask", "=", "None", "if", "(", "hparams", ".", "kernel_scheme", "in", "_KERNEL_SCHEMES", "and", "hparams", ".", "dilation_scheme", "in", "_DILATION_SCHEMES", ")", ":", "kernels", "=", "_KERNEL_SCHEMES", "[", "hparams", ".", "kernel_scheme", "]", "dilations", "=", "_DILATION_SCHEMES", "[", "hparams", ".", "dilation_scheme", "]", "dilations_and_kernels", "=", "list", "(", "zip", "(", "dilations", ",", "kernels", ")", ")", "dilations_and_kernels1", "=", "dilations_and_kernels", "[", ":", "2", "]", "dilations_and_kernels2", "=", "dilations_and_kernels", "[", "2", ":", "]", "else", ":", "k", "=", "(", "hparams", ".", "kernel_height", ",", "hparams", ".", "kernel_width", ")", "k2", "=", "(", "hparams", ".", "large_kernel_size", ",", "1", ")", "dilations_and_kernels1", "=", "[", "(", "(", "1", ",", "1", ")", ",", "k", ")", ",", "(", "(", "1", ",", "1", ")", ",", "k", ")", "]", "dilations_and_kernels2", "=", "[", "(", "(", "1", ",", "1", ")", ",", "k2", ")", ",", "(", "(", "4", ",", "4", ")", ",", "k2", ")", "]", "separabilities1", "=", "[", "hparams", ".", "separability", ",", "hparams", ".", "separability", "]", "separabilities2", "=", "[", "hparams", ".", "separability", "]", "*", "len", "(", "dilations_and_kernels2", ")", "if", "hparams", ".", "separability", "<", "0", ":", "separabilities1", "=", "[", "hparams", ".", "separability", "-", "1", ",", "hparams", ".", "separability", "]", "separabilities2", "=", "[", "hparams", ".", "separability", "-", "i", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "dilations_and_kernels2", ")", ")", ")", "]", "def", "norm_fn", "(", "x", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"norm\"", ")", ":", "return", "common_layers", ".", "apply_norm", "(", "x", ",", "hparams", ".", "norm_type", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "norm_epsilon", ")", "for", "layer", "in", "range", "(", "layers", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"layer_%d\"", "%", "layer", ")", ":", "y", "=", "common_layers", ".", "subseparable_conv_block", "(", "x", ",", "hparams", ".", "hidden_size", ",", "dilations_and_kernels1", ",", "normalizer_fn", "=", "norm_fn", ",", "padding", "=", "padding", ",", "mask", "=", "mask", ",", "separabilities", "=", "separabilities1", ",", "name", "=", "\"residual1\"", ")", "x", "+=", "common_layers", ".", "subseparable_conv_block", "(", "x", "+", "y", ",", "hparams", ".", "hidden_size", ",", "dilations_and_kernels2", ",", "normalizer_fn", "=", "norm_fn", ",", "padding", "=", "padding", ",", "mask", "=", "mask", ",", "separabilities", "=", "separabilities2", ",", "name", "=", "\"residual2\"", ")", "+", "y", "if", "source", "is", "not", "None", "and", "hparams", ".", "attention_type", "!=", "\"none\"", ":", "x", "+=", "attention", "(", "x", ",", "source", ",", "norm_fn", ",", "hparams", ",", "bias", "=", "padding_bias", ")", "if", "mask", "is", "not", "None", ":", "x", "*=", "mask", "return", "tf", ".", "nn", ".", "dropout", "(", "x", ",", "1.0", "-", "hparams", ".", "dropout", ")" ]
A stack of separable convolution blocks with residual connections.
[ "A", "stack", "of", "separable", "convolution", "blocks", "with", "residual", "connections", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L84-L142
22,580
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
rank_loss
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
python
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
[ "def", "rank_loss", "(", "sentence_emb", ",", "image_emb", ",", "margin", "=", "0.2", ")", ":", "with", "tf", ".", "name_scope", "(", "\"rank_loss\"", ")", ":", "# Normalize first as this is assumed in cosine similarity later.", "sentence_emb", "=", "tf", ".", "nn", ".", "l2_normalize", "(", "sentence_emb", ",", "1", ")", "image_emb", "=", "tf", ".", "nn", ".", "l2_normalize", "(", "image_emb", ",", "1", ")", "# Both sentence_emb and image_emb have size [batch, depth].", "scores", "=", "tf", ".", "matmul", "(", "image_emb", ",", "tf", ".", "transpose", "(", "sentence_emb", ")", ")", "# [batch, batch]", "diagonal", "=", "tf", ".", "diag_part", "(", "scores", ")", "# [batch]", "cost_s", "=", "tf", ".", "maximum", "(", "0.0", ",", "margin", "-", "diagonal", "+", "scores", ")", "# [batch, batch]", "cost_im", "=", "tf", ".", "maximum", "(", "0.0", ",", "margin", "-", "tf", ".", "reshape", "(", "diagonal", ",", "[", "-", "1", ",", "1", "]", ")", "+", "scores", ")", "# [batch, batch]", "# Clear diagonals.", "batch_size", "=", "tf", ".", "shape", "(", "sentence_emb", ")", "[", "0", "]", "empty_diagonal_mat", "=", "tf", ".", "ones_like", "(", "cost_s", ")", "-", "tf", ".", "eye", "(", "batch_size", ")", "cost_s", "*=", "empty_diagonal_mat", "cost_im", "*=", "empty_diagonal_mat", "return", "tf", ".", "reduce_mean", "(", "cost_s", ")", "+", "tf", ".", "reduce_mean", "(", "cost_im", ")" ]
Experimental rank loss, thanks to kkurach@ for the code.
[ "Experimental", "rank", "loss", "thanks", "to", "kkurach" ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L145-L162
22,581
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
similarity_cost
def similarity_cost(inputs_encoded, targets_encoded): """Loss telling to be more similar to your own targets than to others.""" # This is a first very simple version: handle variable-length by padding # to same length and putting everything into batch. In need of a better way. x, y = common_layers.pad_to_same_length(inputs_encoded, targets_encoded) depth = tf.shape(inputs_encoded)[3] x, y = tf.reshape(x, [-1, depth]), tf.reshape(y, [-1, depth]) return rank_loss(x, y)
python
def similarity_cost(inputs_encoded, targets_encoded): """Loss telling to be more similar to your own targets than to others.""" # This is a first very simple version: handle variable-length by padding # to same length and putting everything into batch. In need of a better way. x, y = common_layers.pad_to_same_length(inputs_encoded, targets_encoded) depth = tf.shape(inputs_encoded)[3] x, y = tf.reshape(x, [-1, depth]), tf.reshape(y, [-1, depth]) return rank_loss(x, y)
[ "def", "similarity_cost", "(", "inputs_encoded", ",", "targets_encoded", ")", ":", "# This is a first very simple version: handle variable-length by padding", "# to same length and putting everything into batch. In need of a better way.", "x", ",", "y", "=", "common_layers", ".", "pad_to_same_length", "(", "inputs_encoded", ",", "targets_encoded", ")", "depth", "=", "tf", ".", "shape", "(", "inputs_encoded", ")", "[", "3", "]", "x", ",", "y", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "depth", "]", ")", ",", "tf", ".", "reshape", "(", "y", ",", "[", "-", "1", ",", "depth", "]", ")", "return", "rank_loss", "(", "x", ",", "y", ")" ]
Loss telling to be more similar to your own targets than to others.
[ "Loss", "telling", "to", "be", "more", "similar", "to", "your", "own", "targets", "than", "to", "others", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L165-L172
22,582
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
slicenet_middle
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams): """Middle part of slicenet, connecting encoder and decoder.""" def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) # Flatten targets and embed target_space_id. targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2) target_space_emb = tf.tile(target_space_emb, [tf.shape(targets_flat)[0], 1, 1, 1]) # Use attention from each target to look at input and retrieve. targets_shifted = common_layers.shift_right( targets_flat, pad_value=target_space_emb) if hparams.attention_type == "none": targets_with_attention = tf.zeros_like(targets_shifted) else: inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. targets_with_attention = attention( targets_shifted, inputs_encoded, norm_fn, hparams, bias=inputs_padding_bias) # Positional targets: merge attention and raw. kernel = (hparams.kernel_height, hparams.kernel_width) targets_merged = common_layers.subseparable_conv_block( tf.concat([targets_with_attention, targets_shifted], axis=3), hparams.hidden_size, [((1, 1), kernel)], normalizer_fn=norm_fn, padding="LEFT", separability=4, name="targets_merge") return targets_merged, 0.0
python
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams): """Middle part of slicenet, connecting encoder and decoder.""" def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) # Flatten targets and embed target_space_id. targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2) target_space_emb = tf.tile(target_space_emb, [tf.shape(targets_flat)[0], 1, 1, 1]) # Use attention from each target to look at input and retrieve. targets_shifted = common_layers.shift_right( targets_flat, pad_value=target_space_emb) if hparams.attention_type == "none": targets_with_attention = tf.zeros_like(targets_shifted) else: inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. targets_with_attention = attention( targets_shifted, inputs_encoded, norm_fn, hparams, bias=inputs_padding_bias) # Positional targets: merge attention and raw. kernel = (hparams.kernel_height, hparams.kernel_width) targets_merged = common_layers.subseparable_conv_block( tf.concat([targets_with_attention, targets_shifted], axis=3), hparams.hidden_size, [((1, 1), kernel)], normalizer_fn=norm_fn, padding="LEFT", separability=4, name="targets_merge") return targets_merged, 0.0
[ "def", "slicenet_middle", "(", "inputs_encoded", ",", "targets", ",", "target_space_emb", ",", "mask", ",", "hparams", ")", ":", "def", "norm_fn", "(", "x", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"norm\"", ")", ":", "return", "common_layers", ".", "apply_norm", "(", "x", ",", "hparams", ".", "norm_type", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "norm_epsilon", ")", "# Flatten targets and embed target_space_id.", "targets_flat", "=", "tf", ".", "expand_dims", "(", "common_layers", ".", "flatten4d3d", "(", "targets", ")", ",", "axis", "=", "2", ")", "target_space_emb", "=", "tf", ".", "tile", "(", "target_space_emb", ",", "[", "tf", ".", "shape", "(", "targets_flat", ")", "[", "0", "]", ",", "1", ",", "1", ",", "1", "]", ")", "# Use attention from each target to look at input and retrieve.", "targets_shifted", "=", "common_layers", ".", "shift_right", "(", "targets_flat", ",", "pad_value", "=", "target_space_emb", ")", "if", "hparams", ".", "attention_type", "==", "\"none\"", ":", "targets_with_attention", "=", "tf", ".", "zeros_like", "(", "targets_shifted", ")", "else", ":", "inputs_padding_bias", "=", "(", "1.0", "-", "mask", ")", "*", "-", "1e9", "# Bias to not attend to padding.", "targets_with_attention", "=", "attention", "(", "targets_shifted", ",", "inputs_encoded", ",", "norm_fn", ",", "hparams", ",", "bias", "=", "inputs_padding_bias", ")", "# Positional targets: merge attention and raw.", "kernel", "=", "(", "hparams", ".", "kernel_height", ",", "hparams", ".", "kernel_width", ")", "targets_merged", "=", "common_layers", ".", "subseparable_conv_block", "(", "tf", ".", "concat", "(", "[", "targets_with_attention", ",", "targets_shifted", "]", ",", "axis", "=", "3", ")", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "kernel", ")", "]", ",", "normalizer_fn", "=", "norm_fn", ",", "padding", "=", "\"LEFT\"", ",", "separability", "=", "4", ",", "name", "=", "\"targets_merge\"", ")", "return", "targets_merged", ",", "0.0" ]
Middle part of slicenet, connecting encoder and decoder.
[ "Middle", "part", "of", "slicenet", "connecting", "encoder", "and", "decoder", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L175-L212
22,583
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
embedding_to_padding
def embedding_to_padding(emb): """Input embeddings -> is_padding.""" emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1, keep_dims=True) return tf.to_float(tf.equal(emb_sum, 0.0))
python
def embedding_to_padding(emb): """Input embeddings -> is_padding.""" emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1, keep_dims=True) return tf.to_float(tf.equal(emb_sum, 0.0))
[ "def", "embedding_to_padding", "(", "emb", ")", ":", "emb_sum", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "abs", "(", "emb", ")", ",", "axis", "=", "-", "1", ",", "keep_dims", "=", "True", ")", "return", "tf", ".", "to_float", "(", "tf", ".", "equal", "(", "emb_sum", ",", "0.0", ")", ")" ]
Input embeddings -> is_padding.
[ "Input", "embeddings", "-", ">", "is_padding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L221-L224
22,584
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
slicenet_internal
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): # Project to hidden size if necessary if inputs.get_shape().as_list()[-1] != hparams.hidden_size: inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True) # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) inputs_encoded = multi_conv_res( inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, "LEFT", "decoder", hparams.num_hidden_layers, hparams, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss)
python
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): # Project to hidden size if necessary if inputs.get_shape().as_list()[-1] != hparams.hidden_size: inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True) # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) inputs_encoded = multi_conv_res( inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, "LEFT", "decoder", hparams.num_hidden_layers, hparams, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss)
[ "def", "slicenet_internal", "(", "inputs", ",", "targets", ",", "target_space", ",", "hparams", ",", "run_decoder", "=", "True", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"slicenet\"", ")", ":", "# Project to hidden size if necessary", "if", "inputs", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", "!=", "hparams", ".", "hidden_size", ":", "inputs", "=", "common_layers", ".", "conv_block", "(", "inputs", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "3", ",", "3", ")", ")", "]", ",", "first_relu", "=", "False", ",", "padding", "=", "\"SAME\"", ",", "force2d", "=", "True", ")", "# Flatten inputs and encode.", "inputs", "=", "tf", ".", "expand_dims", "(", "common_layers", ".", "flatten4d3d", "(", "inputs", ")", ",", "axis", "=", "2", ")", "inputs_mask", "=", "1.0", "-", "embedding_to_padding", "(", "inputs", ")", "inputs", "=", "common_layers", ".", "add_timing_signal", "(", "inputs", ")", "# Add position info.", "target_space_emb", "=", "embed_target_space", "(", "target_space", ",", "hparams", ".", "hidden_size", ")", "extra_layers", "=", "int", "(", "hparams", ".", "num_hidden_layers", "*", "1.5", ")", "inputs_encoded", "=", "multi_conv_res", "(", "inputs", ",", "\"SAME\"", ",", "\"encoder\"", ",", "extra_layers", ",", "hparams", ",", "mask", "=", "inputs_mask", ")", "if", "not", "run_decoder", ":", "return", "inputs_encoded", "# Do the middle part.", "decoder_start", ",", "similarity_loss", "=", "slicenet_middle", "(", "inputs_encoded", ",", "targets", ",", "target_space_emb", ",", "inputs_mask", ",", "hparams", ")", "# Decode.", "decoder_final", "=", "multi_conv_res", "(", "decoder_start", ",", "\"LEFT\"", ",", "\"decoder\"", ",", "hparams", ".", "num_hidden_layers", ",", "hparams", ",", "mask", "=", "inputs_mask", ",", "source", "=", "inputs_encoded", ")", "return", "decoder_final", ",", "tf", ".", "reduce_mean", "(", "similarity_loss", ")" ]
The slicenet model, main step used for training.
[ "The", "slicenet", "model", "main", "step", "used", "for", "training", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L227-L261
22,585
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
slicenet_params1_noam
def slicenet_params1_noam(): """Version with Noam's decay scheme.""" hparams = slicenet_params1() hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 1.0 hparams.learning_rate_warmup_steps = 4000 hparams.initializer = "uniform_unit_scaling" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 return hparams
python
def slicenet_params1_noam(): """Version with Noam's decay scheme.""" hparams = slicenet_params1() hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 1.0 hparams.learning_rate_warmup_steps = 4000 hparams.initializer = "uniform_unit_scaling" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 return hparams
[ "def", "slicenet_params1_noam", "(", ")", ":", "hparams", "=", "slicenet_params1", "(", ")", "hparams", ".", "learning_rate_decay_scheme", "=", "\"noam\"", "hparams", ".", "learning_rate", "=", "1.0", "hparams", ".", "learning_rate_warmup_steps", "=", "4000", "hparams", ".", "initializer", "=", "\"uniform_unit_scaling\"", "hparams", ".", "optimizer_adam_epsilon", "=", "1e-9", "hparams", ".", "optimizer_adam_beta1", "=", "0.9", "hparams", ".", "optimizer_adam_beta2", "=", "0.98", "return", "hparams" ]
Version with Noam's decay scheme.
[ "Version", "with", "Noam", "s", "decay", "scheme", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L338-L348
22,586
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
slicenet_params1_tiny
def slicenet_params1_tiny(): """Version for fast local runs.""" hparams = slicenet_params1() hparams.attention_type = "simple" hparams.separability = 0 hparams.hidden_size = 128 hparams.num_hidden_layers = 2 hparams.batch_size = 512 hparams.learning_rate_warmup_steps = 200 return hparams
python
def slicenet_params1_tiny(): """Version for fast local runs.""" hparams = slicenet_params1() hparams.attention_type = "simple" hparams.separability = 0 hparams.hidden_size = 128 hparams.num_hidden_layers = 2 hparams.batch_size = 512 hparams.learning_rate_warmup_steps = 200 return hparams
[ "def", "slicenet_params1_tiny", "(", ")", ":", "hparams", "=", "slicenet_params1", "(", ")", "hparams", ".", "attention_type", "=", "\"simple\"", "hparams", ".", "separability", "=", "0", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "batch_size", "=", "512", "hparams", ".", "learning_rate_warmup_steps", "=", "200", "return", "hparams" ]
Version for fast local runs.
[ "Version", "for", "fast", "local", "runs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L352-L361
22,587
tensorflow/tensor2tensor
tensor2tensor/data_generators/pointer_generator_word.py
TokenTextEncoderOov.decode_list_oov
def decode_list_oov(self, ids, source_oov_id_to_token): """decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens. """ seq = reversed(ids) if self._reverse else ids tokens = [] for cur_id in seq: if cur_id in self._id_to_token: tokens.append(self._id_to_token[cur_id]) else: tokens.append(source_oov_id_to_token[cur_id - self.vocab_size]) return tokens
python
def decode_list_oov(self, ids, source_oov_id_to_token): """decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens. """ seq = reversed(ids) if self._reverse else ids tokens = [] for cur_id in seq: if cur_id in self._id_to_token: tokens.append(self._id_to_token[cur_id]) else: tokens.append(source_oov_id_to_token[cur_id - self.vocab_size]) return tokens
[ "def", "decode_list_oov", "(", "self", ",", "ids", ",", "source_oov_id_to_token", ")", ":", "seq", "=", "reversed", "(", "ids", ")", "if", "self", ".", "_reverse", "else", "ids", "tokens", "=", "[", "]", "for", "cur_id", "in", "seq", ":", "if", "cur_id", "in", "self", ".", "_id_to_token", ":", "tokens", ".", "append", "(", "self", ".", "_id_to_token", "[", "cur_id", "]", ")", "else", ":", "tokens", ".", "append", "(", "source_oov_id_to_token", "[", "cur_id", "-", "self", ".", "vocab_size", "]", ")", "return", "tokens" ]
decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens.
[ "decode", "ids", "back", "to", "tokens", "considering", "OOVs", "temporary", "IDs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/pointer_generator_word.py#L178-L198
22,588
tensorflow/tensor2tensor
tensor2tensor/data_generators/vqa_utils.py
_distort_color
def _distort_color(image, color_ordering=0, scope=None): """Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3] """ with tf.name_scope(scope, "distort_color", [image]): if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) elif color_ordering == 2: image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) elif color_ordering == 3: image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) else: raise ValueError("color_ordering must be in [0, 3]") # The random_* ops do not necessarily clamp. return tf.clip_by_value(image, 0.0, 1.0)
python
def _distort_color(image, color_ordering=0, scope=None): """Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3] """ with tf.name_scope(scope, "distort_color", [image]): if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) elif color_ordering == 2: image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) elif color_ordering == 3: image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) else: raise ValueError("color_ordering must be in [0, 3]") # The random_* ops do not necessarily clamp. return tf.clip_by_value(image, 0.0, 1.0)
[ "def", "_distort_color", "(", "image", ",", "color_ordering", "=", "0", ",", "scope", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "scope", ",", "\"distort_color\"", ",", "[", "image", "]", ")", ":", "if", "color_ordering", "==", "0", ":", "image", "=", "tf", ".", "image", ".", "random_brightness", "(", "image", ",", "max_delta", "=", "32.", "/", "255.", ")", "image", "=", "tf", ".", "image", ".", "random_saturation", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "image", "=", "tf", ".", "image", ".", "random_hue", "(", "image", ",", "max_delta", "=", "0.2", ")", "image", "=", "tf", ".", "image", ".", "random_contrast", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "elif", "color_ordering", "==", "1", ":", "image", "=", "tf", ".", "image", ".", "random_saturation", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "image", "=", "tf", ".", "image", ".", "random_brightness", "(", "image", ",", "max_delta", "=", "32.", "/", "255.", ")", "image", "=", "tf", ".", "image", ".", "random_contrast", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "image", "=", "tf", ".", "image", ".", "random_hue", "(", "image", ",", "max_delta", "=", "0.2", ")", "elif", "color_ordering", "==", "2", ":", "image", "=", "tf", ".", "image", ".", "random_contrast", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "image", "=", "tf", ".", "image", ".", "random_hue", "(", "image", ",", "max_delta", "=", "0.2", ")", "image", "=", "tf", ".", "image", ".", "random_brightness", "(", "image", ",", "max_delta", "=", "32.", "/", "255.", ")", "image", "=", "tf", ".", "image", ".", "random_saturation", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "elif", "color_ordering", "==", "3", ":", "image", "=", "tf", ".", "image", ".", "random_hue", "(", "image", ",", "max_delta", "=", "0.2", ")", "image", "=", "tf", ".", "image", ".", "random_saturation", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "image", "=", "tf", ".", "image", ".", "random_contrast", "(", "image", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "image", "=", "tf", ".", "image", ".", "random_brightness", "(", "image", ",", "max_delta", "=", "32.", "/", "255.", ")", "else", ":", "raise", "ValueError", "(", "\"color_ordering must be in [0, 3]\"", ")", "# The random_* ops do not necessarily clamp.", "return", "tf", ".", "clip_by_value", "(", "image", ",", "0.0", ",", "1.0", ")" ]
Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3]
[ "Distort", "the", "color", "of", "a", "Tensor", "image", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa_utils.py#L98-L140
22,589
tensorflow/tensor2tensor
tensor2tensor/data_generators/vqa_utils.py
vqa_v2_preprocess_image
def vqa_v2_preprocess_image( image, height, width, mode, resize_side=512, distort=True, image_model_fn="resnet_v1_152", ): """vqa v2 preprocess image.""" image = tf.image.convert_image_dtype(image, dtype=tf.float32) assert resize_side > 0 if resize_side: image = _aspect_preserving_resize(image, resize_side) if mode == tf.estimator.ModeKeys.TRAIN: image = tf.random_crop(image, [height, width, 3]) else: # Central crop, assuming resize_height > height, resize_width > width. image = tf.image.resize_image_with_crop_or_pad(image, height, width) image = tf.clip_by_value(image, 0.0, 1.0) if mode == tf.estimator.ModeKeys.TRAIN and distort: image = _flip(image) num_distort_cases = 4 # pylint: disable=unnecessary-lambda image = _apply_with_random_selector( image, lambda x, ordering: _distort_color(x, ordering), num_cases=num_distort_cases) if image_model_fn.startswith("resnet_v1"): # resnet_v1 uses vgg preprocessing image = image * 255. image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) elif image_model_fn.startswith("resnet_v2"): # resnet v2 uses inception preprocessing image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
python
def vqa_v2_preprocess_image( image, height, width, mode, resize_side=512, distort=True, image_model_fn="resnet_v1_152", ): """vqa v2 preprocess image.""" image = tf.image.convert_image_dtype(image, dtype=tf.float32) assert resize_side > 0 if resize_side: image = _aspect_preserving_resize(image, resize_side) if mode == tf.estimator.ModeKeys.TRAIN: image = tf.random_crop(image, [height, width, 3]) else: # Central crop, assuming resize_height > height, resize_width > width. image = tf.image.resize_image_with_crop_or_pad(image, height, width) image = tf.clip_by_value(image, 0.0, 1.0) if mode == tf.estimator.ModeKeys.TRAIN and distort: image = _flip(image) num_distort_cases = 4 # pylint: disable=unnecessary-lambda image = _apply_with_random_selector( image, lambda x, ordering: _distort_color(x, ordering), num_cases=num_distort_cases) if image_model_fn.startswith("resnet_v1"): # resnet_v1 uses vgg preprocessing image = image * 255. image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) elif image_model_fn.startswith("resnet_v2"): # resnet v2 uses inception preprocessing image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
[ "def", "vqa_v2_preprocess_image", "(", "image", ",", "height", ",", "width", ",", "mode", ",", "resize_side", "=", "512", ",", "distort", "=", "True", ",", "image_model_fn", "=", "\"resnet_v1_152\"", ",", ")", ":", "image", "=", "tf", ".", "image", ".", "convert_image_dtype", "(", "image", ",", "dtype", "=", "tf", ".", "float32", ")", "assert", "resize_side", ">", "0", "if", "resize_side", ":", "image", "=", "_aspect_preserving_resize", "(", "image", ",", "resize_side", ")", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "image", "=", "tf", ".", "random_crop", "(", "image", ",", "[", "height", ",", "width", ",", "3", "]", ")", "else", ":", "# Central crop, assuming resize_height > height, resize_width > width.", "image", "=", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "image", ",", "height", ",", "width", ")", "image", "=", "tf", ".", "clip_by_value", "(", "image", ",", "0.0", ",", "1.0", ")", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "and", "distort", ":", "image", "=", "_flip", "(", "image", ")", "num_distort_cases", "=", "4", "# pylint: disable=unnecessary-lambda", "image", "=", "_apply_with_random_selector", "(", "image", ",", "lambda", "x", ",", "ordering", ":", "_distort_color", "(", "x", ",", "ordering", ")", ",", "num_cases", "=", "num_distort_cases", ")", "if", "image_model_fn", ".", "startswith", "(", "\"resnet_v1\"", ")", ":", "# resnet_v1 uses vgg preprocessing", "image", "=", "image", "*", "255.", "image", "=", "_mean_image_subtraction", "(", "image", ",", "[", "_R_MEAN", ",", "_G_MEAN", ",", "_B_MEAN", "]", ")", "elif", "image_model_fn", ".", "startswith", "(", "\"resnet_v2\"", ")", ":", "# resnet v2 uses inception preprocessing", "image", "=", "tf", ".", "subtract", "(", "image", ",", "0.5", ")", "image", "=", "tf", ".", "multiply", "(", "image", ",", "2.0", ")", "return", "image" ]
vqa v2 preprocess image.
[ "vqa", "v2", "preprocess", "image", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa_utils.py#L196-L236
22,590
tensorflow/tensor2tensor
tensor2tensor/layers/transformer_layers.py
transformer_prepare_encoder
def transformer_prepare_encoder(inputs, target_space, hparams, features=None): """Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention """ ishape_static = inputs.shape.as_list() encoder_input = inputs if features and "inputs_segmentation" in features: # Packed dataset. Keep the examples from seeing each other. inputs_segmentation = features["inputs_segmentation"] inputs_position = features["inputs_position"] targets_segmentation = features["targets_segmentation"] if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: encoder_self_attention_bias = ( common_attention.attention_bias_same_segment( inputs_segmentation, inputs_segmentation)) encoder_decoder_attention_bias = ( common_attention.attention_bias_same_segment(targets_segmentation, inputs_segmentation)) else: encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: # Usual case - not a packed dataset. encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding inputs_position = None if hparams.proximity_bias: encoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(inputs)[1]) if target_space is not None and hparams.get("use_target_space_embedding", True): # Append target_space_id embedding to inputs. emb_target_space = common_layers.embedding( target_space, 32, ishape_static[-1], name="target_space_embedding", dtype=hparams.get("activation_dtype", "float32")) emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": if inputs_position is not None: encoder_input = common_attention.add_timing_signal_1d_given_position( encoder_input, inputs_position) else: encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", inputs_position) encoder_self_attention_bias = common_layers.cast_like( encoder_self_attention_bias, encoder_input) encoder_decoder_attention_bias = common_layers.cast_like( encoder_decoder_attention_bias, encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
python
def transformer_prepare_encoder(inputs, target_space, hparams, features=None): """Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention """ ishape_static = inputs.shape.as_list() encoder_input = inputs if features and "inputs_segmentation" in features: # Packed dataset. Keep the examples from seeing each other. inputs_segmentation = features["inputs_segmentation"] inputs_position = features["inputs_position"] targets_segmentation = features["targets_segmentation"] if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: encoder_self_attention_bias = ( common_attention.attention_bias_same_segment( inputs_segmentation, inputs_segmentation)) encoder_decoder_attention_bias = ( common_attention.attention_bias_same_segment(targets_segmentation, inputs_segmentation)) else: encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: # Usual case - not a packed dataset. encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding inputs_position = None if hparams.proximity_bias: encoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(inputs)[1]) if target_space is not None and hparams.get("use_target_space_embedding", True): # Append target_space_id embedding to inputs. emb_target_space = common_layers.embedding( target_space, 32, ishape_static[-1], name="target_space_embedding", dtype=hparams.get("activation_dtype", "float32")) emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": if inputs_position is not None: encoder_input = common_attention.add_timing_signal_1d_given_position( encoder_input, inputs_position) else: encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", inputs_position) encoder_self_attention_bias = common_layers.cast_like( encoder_self_attention_bias, encoder_input) encoder_decoder_attention_bias = common_layers.cast_like( encoder_decoder_attention_bias, encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
[ "def", "transformer_prepare_encoder", "(", "inputs", ",", "target_space", ",", "hparams", ",", "features", "=", "None", ")", ":", "ishape_static", "=", "inputs", ".", "shape", ".", "as_list", "(", ")", "encoder_input", "=", "inputs", "if", "features", "and", "\"inputs_segmentation\"", "in", "features", ":", "# Packed dataset. Keep the examples from seeing each other.", "inputs_segmentation", "=", "features", "[", "\"inputs_segmentation\"", "]", "inputs_position", "=", "features", "[", "\"inputs_position\"", "]", "targets_segmentation", "=", "features", "[", "\"targets_segmentation\"", "]", "if", "(", "hasattr", "(", "hparams", ",", "\"unidirectional_encoder\"", ")", "and", "hparams", ".", "unidirectional_encoder", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Using unidirectional encoder\"", ")", "encoder_self_attention_bias", "=", "(", "common_attention", ".", "attention_bias_lower_triangle", "(", "common_layers", ".", "shape_list", "(", "inputs", ")", "[", "1", "]", ")", ")", "else", ":", "encoder_self_attention_bias", "=", "(", "common_attention", ".", "attention_bias_same_segment", "(", "inputs_segmentation", ",", "inputs_segmentation", ")", ")", "encoder_decoder_attention_bias", "=", "(", "common_attention", ".", "attention_bias_same_segment", "(", "targets_segmentation", ",", "inputs_segmentation", ")", ")", "else", ":", "encoder_padding", "=", "common_attention", ".", "embedding_to_padding", "(", "encoder_input", ")", "ignore_padding", "=", "common_attention", ".", "attention_bias_ignore_padding", "(", "encoder_padding", ")", "if", "(", "hasattr", "(", "hparams", ",", "\"unidirectional_encoder\"", ")", "and", "hparams", ".", "unidirectional_encoder", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Using unidirectional encoder\"", ")", "encoder_self_attention_bias", "=", "(", "common_attention", ".", "attention_bias_lower_triangle", "(", "common_layers", ".", "shape_list", "(", "inputs", ")", "[", "1", "]", ")", ")", "else", ":", "# Usual case - not a packed dataset.", "encoder_self_attention_bias", "=", "ignore_padding", "encoder_decoder_attention_bias", "=", "ignore_padding", "inputs_position", "=", "None", "if", "hparams", ".", "proximity_bias", ":", "encoder_self_attention_bias", "+=", "common_attention", ".", "attention_bias_proximal", "(", "common_layers", ".", "shape_list", "(", "inputs", ")", "[", "1", "]", ")", "if", "target_space", "is", "not", "None", "and", "hparams", ".", "get", "(", "\"use_target_space_embedding\"", ",", "True", ")", ":", "# Append target_space_id embedding to inputs.", "emb_target_space", "=", "common_layers", ".", "embedding", "(", "target_space", ",", "32", ",", "ishape_static", "[", "-", "1", "]", ",", "name", "=", "\"target_space_embedding\"", ",", "dtype", "=", "hparams", ".", "get", "(", "\"activation_dtype\"", ",", "\"float32\"", ")", ")", "emb_target_space", "=", "tf", ".", "reshape", "(", "emb_target_space", ",", "[", "1", ",", "1", ",", "-", "1", "]", ")", "encoder_input", "+=", "emb_target_space", "if", "hparams", ".", "pos", "==", "\"timing\"", ":", "if", "inputs_position", "is", "not", "None", ":", "encoder_input", "=", "common_attention", ".", "add_timing_signal_1d_given_position", "(", "encoder_input", ",", "inputs_position", ")", "else", ":", "encoder_input", "=", "common_attention", ".", "add_timing_signal_1d", "(", "encoder_input", ")", "elif", "hparams", ".", "pos", "==", "\"emb\"", ":", "encoder_input", "=", "common_attention", ".", "add_positional_embedding", "(", "encoder_input", ",", "hparams", ".", "max_length", ",", "\"inputs_positional_embedding\"", ",", "inputs_position", ")", "encoder_self_attention_bias", "=", "common_layers", ".", "cast_like", "(", "encoder_self_attention_bias", ",", "encoder_input", ")", "encoder_decoder_attention_bias", "=", "common_layers", ".", "cast_like", "(", "encoder_decoder_attention_bias", ",", "encoder_input", ")", "return", "(", "encoder_input", ",", "encoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ")" ]
Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention
[ "Prepare", "one", "shard", "of", "the", "model", "for", "the", "encoder", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_layers.py#L35-L116
22,591
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
lmx_base
def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
python
def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
[ "def", "lmx_base", "(", ")", ":", "hparams", "=", "transformer", ".", "transformer_tpu", "(", ")", "# sharing is counterproductive when underparameterized", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "# we judge by log-ppl, so label smoothing hurts.", "hparams", ".", "label_smoothing", "=", "0.0", "# This makes the batch size on GPU the same as on TPU for a packed problem", "# with sequence length 256.", "# TODO(noam): fix the mess that is the data reading pipeline.", "hparams", ".", "max_length", "=", "256", "# larger batch since we only have a decoder", "hparams", ".", "batch_size", "=", "4096", "# save some memory so we can have a larger model", "hparams", ".", "activation_dtype", "=", "\"bfloat16\"", "return", "hparams" ]
Transformer on languagemodel_lm1b32k_packed. 50M Params.
[ "Transformer", "on", "languagemodel_lm1b32k_packed", ".", "50M", "Params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L45-L60
22,592
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
lmx_h4k_f16k
def lmx_h4k_f16k(): """HParams for training languagemodel_lm1b32k_packed. 1470M Params.""" hparams = lmx_base() hparams.hidden_size = 4096 hparams.filter_size = 16384 hparams.batch_size = 1024 hparams.weight_dtype = "bfloat16" return hparams
python
def lmx_h4k_f16k(): """HParams for training languagemodel_lm1b32k_packed. 1470M Params.""" hparams = lmx_base() hparams.hidden_size = 4096 hparams.filter_size = 16384 hparams.batch_size = 1024 hparams.weight_dtype = "bfloat16" return hparams
[ "def", "lmx_h4k_f16k", "(", ")", ":", "hparams", "=", "lmx_base", "(", ")", "hparams", ".", "hidden_size", "=", "4096", "hparams", ".", "filter_size", "=", "16384", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "weight_dtype", "=", "\"bfloat16\"", "return", "hparams" ]
HParams for training languagemodel_lm1b32k_packed. 1470M Params.
[ "HParams", "for", "training", "languagemodel_lm1b32k_packed", ".", "1470M", "Params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L93-L100
22,593
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
lmx_relative
def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
python
def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
[ "def", "lmx_relative", "(", ")", ":", "hparams", "=", "lmx_base", "(", ")", "hparams", ".", "self_attention_type", "=", "\"dot_product_relative_v2\"", "hparams", ".", "activation_dtype", "=", "\"float32\"", "hparams", ".", "weight_dtype", "=", "\"float32\"", "return", "hparams" ]
Language model using relative attention.
[ "Language", "model", "using", "relative", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L104-L110
22,594
tensorflow/tensor2tensor
tensor2tensor/rl/envs/simulated_batch_env.py
compute_uncertainty_reward
def compute_uncertainty_reward(logits, predictions): """Uncertainty reward based on logits.""" # TODO(rsepassi): Add support for L1/L2 loss models. Current code only # works for softmax models. vocab_size = logits.shape[-1] assert vocab_size > 1 log_probs = common_layers.log_prob_from_logits(logits) max_log_probs = common_layers.index_last_dim_with_indices(log_probs, predictions) # Threshold neg_log_prob = tf.nn.relu(-max_log_probs - 0.02) # Sum across all but the batch dimension reduce_dims = list(range(len(neg_log_prob.shape)))[1:] summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims) return summed / 10
python
def compute_uncertainty_reward(logits, predictions): """Uncertainty reward based on logits.""" # TODO(rsepassi): Add support for L1/L2 loss models. Current code only # works for softmax models. vocab_size = logits.shape[-1] assert vocab_size > 1 log_probs = common_layers.log_prob_from_logits(logits) max_log_probs = common_layers.index_last_dim_with_indices(log_probs, predictions) # Threshold neg_log_prob = tf.nn.relu(-max_log_probs - 0.02) # Sum across all but the batch dimension reduce_dims = list(range(len(neg_log_prob.shape)))[1:] summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims) return summed / 10
[ "def", "compute_uncertainty_reward", "(", "logits", ",", "predictions", ")", ":", "# TODO(rsepassi): Add support for L1/L2 loss models. Current code only", "# works for softmax models.", "vocab_size", "=", "logits", ".", "shape", "[", "-", "1", "]", "assert", "vocab_size", ">", "1", "log_probs", "=", "common_layers", ".", "log_prob_from_logits", "(", "logits", ")", "max_log_probs", "=", "common_layers", ".", "index_last_dim_with_indices", "(", "log_probs", ",", "predictions", ")", "# Threshold", "neg_log_prob", "=", "tf", ".", "nn", ".", "relu", "(", "-", "max_log_probs", "-", "0.02", ")", "# Sum across all but the batch dimension", "reduce_dims", "=", "list", "(", "range", "(", "len", "(", "neg_log_prob", ".", "shape", ")", ")", ")", "[", "1", ":", "]", "summed", "=", "tf", ".", "reduce_sum", "(", "neg_log_prob", ",", "axis", "=", "reduce_dims", ")", "return", "summed", "/", "10" ]
Uncertainty reward based on logits.
[ "Uncertainty", "reward", "based", "on", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/envs/simulated_batch_env.py#L85-L99
22,595
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_datagen.py
set_random_seed
def set_random_seed(): """Set the random seed from flag everywhere.""" tf.set_random_seed(FLAGS.random_seed) random.seed(FLAGS.random_seed) np.random.seed(FLAGS.random_seed)
python
def set_random_seed(): """Set the random seed from flag everywhere.""" tf.set_random_seed(FLAGS.random_seed) random.seed(FLAGS.random_seed) np.random.seed(FLAGS.random_seed)
[ "def", "set_random_seed", "(", ")", ":", "tf", ".", "set_random_seed", "(", "FLAGS", ".", "random_seed", ")", "random", ".", "seed", "(", "FLAGS", ".", "random_seed", ")", "np", ".", "random", ".", "seed", "(", "FLAGS", ".", "random_seed", ")" ]
Set the random seed from flag everywhere.
[ "Set", "the", "random", "seed", "from", "flag", "everywhere", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_datagen.py#L154-L158
22,596
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_datagen.py
generate_data_for_problem
def generate_data_for_problem(problem): """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.""" training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem] num_train_shards = FLAGS.num_shards or 10 tf.logging.info("Generating training data for %s.", problem) train_output_files = generator_utils.train_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_train_shards) generator_utils.generate_files(training_gen(), train_output_files, FLAGS.max_cases) num_dev_shards = int(num_train_shards * 0.1) tf.logging.info("Generating development data for %s.", problem) dev_output_files = generator_utils.dev_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_dev_shards) generator_utils.generate_files(dev_gen(), dev_output_files) num_test_shards = int(num_train_shards * 0.1) test_output_files = [] test_gen_data = test_gen() if test_gen_data is not None: tf.logging.info("Generating test data for %s.", problem) test_output_files = generator_utils.test_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_test_shards) generator_utils.generate_files(test_gen_data, test_output_files) all_output_files = train_output_files + dev_output_files + test_output_files generator_utils.shuffle_dataset(all_output_files)
python
def generate_data_for_problem(problem): """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.""" training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem] num_train_shards = FLAGS.num_shards or 10 tf.logging.info("Generating training data for %s.", problem) train_output_files = generator_utils.train_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_train_shards) generator_utils.generate_files(training_gen(), train_output_files, FLAGS.max_cases) num_dev_shards = int(num_train_shards * 0.1) tf.logging.info("Generating development data for %s.", problem) dev_output_files = generator_utils.dev_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_dev_shards) generator_utils.generate_files(dev_gen(), dev_output_files) num_test_shards = int(num_train_shards * 0.1) test_output_files = [] test_gen_data = test_gen() if test_gen_data is not None: tf.logging.info("Generating test data for %s.", problem) test_output_files = generator_utils.test_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_test_shards) generator_utils.generate_files(test_gen_data, test_output_files) all_output_files = train_output_files + dev_output_files + test_output_files generator_utils.shuffle_dataset(all_output_files)
[ "def", "generate_data_for_problem", "(", "problem", ")", ":", "training_gen", ",", "dev_gen", ",", "test_gen", "=", "_SUPPORTED_PROBLEM_GENERATORS", "[", "problem", "]", "num_train_shards", "=", "FLAGS", ".", "num_shards", "or", "10", "tf", ".", "logging", ".", "info", "(", "\"Generating training data for %s.\"", ",", "problem", ")", "train_output_files", "=", "generator_utils", ".", "train_data_filenames", "(", "problem", "+", "generator_utils", ".", "UNSHUFFLED_SUFFIX", ",", "FLAGS", ".", "data_dir", ",", "num_train_shards", ")", "generator_utils", ".", "generate_files", "(", "training_gen", "(", ")", ",", "train_output_files", ",", "FLAGS", ".", "max_cases", ")", "num_dev_shards", "=", "int", "(", "num_train_shards", "*", "0.1", ")", "tf", ".", "logging", ".", "info", "(", "\"Generating development data for %s.\"", ",", "problem", ")", "dev_output_files", "=", "generator_utils", ".", "dev_data_filenames", "(", "problem", "+", "generator_utils", ".", "UNSHUFFLED_SUFFIX", ",", "FLAGS", ".", "data_dir", ",", "num_dev_shards", ")", "generator_utils", ".", "generate_files", "(", "dev_gen", "(", ")", ",", "dev_output_files", ")", "num_test_shards", "=", "int", "(", "num_train_shards", "*", "0.1", ")", "test_output_files", "=", "[", "]", "test_gen_data", "=", "test_gen", "(", ")", "if", "test_gen_data", "is", "not", "None", ":", "tf", ".", "logging", ".", "info", "(", "\"Generating test data for %s.\"", ",", "problem", ")", "test_output_files", "=", "generator_utils", ".", "test_data_filenames", "(", "problem", "+", "generator_utils", ".", "UNSHUFFLED_SUFFIX", ",", "FLAGS", ".", "data_dir", ",", "num_test_shards", ")", "generator_utils", ".", "generate_files", "(", "test_gen_data", ",", "test_output_files", ")", "all_output_files", "=", "train_output_files", "+", "dev_output_files", "+", "test_output_files", "generator_utils", ".", "shuffle_dataset", "(", "all_output_files", ")" ]
Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.
[ "Generate", "data", "for", "a", "problem", "in", "_SUPPORTED_PROBLEM_GENERATORS", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_datagen.py#L224-L251
22,597
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_datagen.py
generate_data_for_env_problem
def generate_data_for_env_problem(problem_name): """Generate data for `EnvProblem`s.""" assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " "should be greater than zero") assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" " greather than zero") problem = registry.env_problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) # TODO(msaffar): Handle large values for env_problem_batch_size where we # cannot create that many environments within the same process. problem.initialize(batch_size=FLAGS.env_problem_batch_size) env_problem_utils.play_env_problem_randomly( problem, num_steps=FLAGS.env_problem_max_env_steps) problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id)
python
def generate_data_for_env_problem(problem_name): """Generate data for `EnvProblem`s.""" assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " "should be greater than zero") assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" " greather than zero") problem = registry.env_problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) # TODO(msaffar): Handle large values for env_problem_batch_size where we # cannot create that many environments within the same process. problem.initialize(batch_size=FLAGS.env_problem_batch_size) env_problem_utils.play_env_problem_randomly( problem, num_steps=FLAGS.env_problem_max_env_steps) problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id)
[ "def", "generate_data_for_env_problem", "(", "problem_name", ")", ":", "assert", "FLAGS", ".", "env_problem_max_env_steps", ">", "0", ",", "(", "\"--env_problem_max_env_steps \"", "\"should be greater than zero\"", ")", "assert", "FLAGS", ".", "env_problem_batch_size", ">", "0", ",", "(", "\"--env_problem_batch_size should be\"", "\" greather than zero\"", ")", "problem", "=", "registry", ".", "env_problem", "(", "problem_name", ")", "task_id", "=", "None", "if", "FLAGS", ".", "task_id", "<", "0", "else", "FLAGS", ".", "task_id", "data_dir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "data_dir", ")", "tmp_dir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "tmp_dir", ")", "# TODO(msaffar): Handle large values for env_problem_batch_size where we", "# cannot create that many environments within the same process.", "problem", ".", "initialize", "(", "batch_size", "=", "FLAGS", ".", "env_problem_batch_size", ")", "env_problem_utils", ".", "play_env_problem_randomly", "(", "problem", ",", "num_steps", "=", "FLAGS", ".", "env_problem_max_env_steps", ")", "problem", ".", "generate_data", "(", "data_dir", "=", "data_dir", ",", "tmp_dir", "=", "tmp_dir", ",", "task_id", "=", "task_id", ")" ]
Generate data for `EnvProblem`s.
[ "Generate", "data", "for", "EnvProblem", "s", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_datagen.py#L260-L275
22,598
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_datagen.py
generate_data_for_registered_problem
def generate_data_for_registered_problem(problem_name): """Generate data for a registered problem.""" tf.logging.info("Generating data for %s.", problem_name) if FLAGS.num_shards: raise ValueError("--num_shards should not be set for registered Problem.") problem = registry.problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) if task_id is None and problem.multiprocess_generate: if FLAGS.task_id_start != -1: assert FLAGS.task_id_end != -1 task_id_start = FLAGS.task_id_start task_id_end = FLAGS.task_id_end else: task_id_start = 0 task_id_end = problem.num_generate_tasks pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes) problem.prepare_to_generate(data_dir, tmp_dir) args = [(problem_name, data_dir, tmp_dir, task_id) for task_id in range(task_id_start, task_id_end)] pool.map(generate_data_in_process, args) else: problem.generate_data(data_dir, tmp_dir, task_id)
python
def generate_data_for_registered_problem(problem_name): """Generate data for a registered problem.""" tf.logging.info("Generating data for %s.", problem_name) if FLAGS.num_shards: raise ValueError("--num_shards should not be set for registered Problem.") problem = registry.problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) if task_id is None and problem.multiprocess_generate: if FLAGS.task_id_start != -1: assert FLAGS.task_id_end != -1 task_id_start = FLAGS.task_id_start task_id_end = FLAGS.task_id_end else: task_id_start = 0 task_id_end = problem.num_generate_tasks pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes) problem.prepare_to_generate(data_dir, tmp_dir) args = [(problem_name, data_dir, tmp_dir, task_id) for task_id in range(task_id_start, task_id_end)] pool.map(generate_data_in_process, args) else: problem.generate_data(data_dir, tmp_dir, task_id)
[ "def", "generate_data_for_registered_problem", "(", "problem_name", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Generating data for %s.\"", ",", "problem_name", ")", "if", "FLAGS", ".", "num_shards", ":", "raise", "ValueError", "(", "\"--num_shards should not be set for registered Problem.\"", ")", "problem", "=", "registry", ".", "problem", "(", "problem_name", ")", "task_id", "=", "None", "if", "FLAGS", ".", "task_id", "<", "0", "else", "FLAGS", ".", "task_id", "data_dir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "data_dir", ")", "tmp_dir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "tmp_dir", ")", "if", "task_id", "is", "None", "and", "problem", ".", "multiprocess_generate", ":", "if", "FLAGS", ".", "task_id_start", "!=", "-", "1", ":", "assert", "FLAGS", ".", "task_id_end", "!=", "-", "1", "task_id_start", "=", "FLAGS", ".", "task_id_start", "task_id_end", "=", "FLAGS", ".", "task_id_end", "else", ":", "task_id_start", "=", "0", "task_id_end", "=", "problem", ".", "num_generate_tasks", "pool", "=", "multiprocessing", ".", "Pool", "(", "processes", "=", "FLAGS", ".", "num_concurrent_processes", ")", "problem", ".", "prepare_to_generate", "(", "data_dir", ",", "tmp_dir", ")", "args", "=", "[", "(", "problem_name", ",", "data_dir", ",", "tmp_dir", ",", "task_id", ")", "for", "task_id", "in", "range", "(", "task_id_start", ",", "task_id_end", ")", "]", "pool", ".", "map", "(", "generate_data_in_process", ",", "args", ")", "else", ":", "problem", ".", "generate_data", "(", "data_dir", ",", "tmp_dir", ",", "task_id", ")" ]
Generate data for a registered problem.
[ "Generate", "data", "for", "a", "registered", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_datagen.py#L278-L301
22,599
tensorflow/tensor2tensor
tensor2tensor/data_generators/common_voice.py
_file_exists
def _file_exists(path, filename): """Checks if the filename exists under the path.""" return os.path.isfile(os.path.join(path, filename))
python
def _file_exists(path, filename): """Checks if the filename exists under the path.""" return os.path.isfile(os.path.join(path, filename))
[ "def", "_file_exists", "(", "path", ",", "filename", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")" ]
Checks if the filename exists under the path.
[ "Checks", "if", "the", "filename", "exists", "under", "the", "path", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/common_voice.py#L69-L71