python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
import os
import sys
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from requests.exceptions import HTTPError
from transformers.commands import BaseTransformersCLICommand
from transformers.hf_api import HfApi, HfFolder
UPLOAD_MAX_FILES = 15
class UserCommands(BaseTr... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/user.py |
import logging
from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, Pipeline, PipelineDataFormat, pipeline
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def try_infer_format_from_ext(path: str):
... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/run.py |
import platform
from argparse import ArgumentParser
from transformers import __version__ as version
from transformers import is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseTra... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/env.py |
import logging
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from transformers import Pipeline
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, pipeline
try:
from uvicorn import run
from fastapi import Fas... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/serving.py |
from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
def download_command_factory(args):
return DownloadCommand(args.model, args.cache_dir, args.force)
class DownloadCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentPa... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/download.py |
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers.commands import BaseTransformersCLICommand
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/convert.py |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseTransformersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/__init__.py |
import os
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers import SingleSentenceClassificationProcessor as Processor
from transformers import TextClassificationPipeline, is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
if n... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/train.py |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from .metrics import is_sklearn_available
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassif... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/__init__.py |
# coding=utf-8
# Based on the SQuAD evaluation script from:
# https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/evaluate_squad.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Official evaluation script for the MLQA dataset. """
from __future__ import print_function
from collections import Counter
i... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/mlqa_evaluation_v1.py |
# coding=utf-8
# Based on the MLQA evaluation script from:
# https://github.com/facebookresearch/MLQA/blob/master/mlqa_evaluation_v1.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source ... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/evaluate_mlqa.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/__init__.py |
""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is p... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/squad_metrics.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/xnli.py |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from .xglue import xglue_convert_examples_to_features, xglue_output_modes, xglue_processors, xglue_tasks_num_labels
from .xtreme import xtreme_conver... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/__init__.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/xtreme.py |
import json
import logging
import os
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import whitespace_tokenize
from .utils import DataProcessor
from ..metrics.squ... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/squad.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/glue.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/utils.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/xglue.py |
"""I/O"""
def _lines_gen_from_single_file(filename):
with open(filename) as fp:
for line in fp: yield line.strip()
def lines_gen(*filenames):
for ret in zip(*map(_lines_gen_from_single_file, filenames)): yield ret | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/io.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/__init__.py | |
import logging
import torch
from transformers.modeling_bert import (BertConfig, BertEncoder,
BertIntermediate, BertLayer,
BertModel, BertOutput,
BertSelfAttention,
... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/tools/convert.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/tools/__init__.py | |
import logging
import numpy as np
import os
import torch
import random
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
try:
from apex import amp
except ImportError:
pass
from src.pequod.trainer import (Trainer,
XClassificationTrainer, XQATrainer, SelfTrainer)
from tra... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/training/xtrainer.py |
import re
import sys
import os
import random
import torch
import pickle
import logging
import numpy as np
# from transformers import (WEIGHTS_NAME,
# BertConfig, BertForSequenceClassification, BertTokenizer,
# RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
# RobertaModel, BertModel, XLMModel,... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/training/__init__.py |
import os
import json
import logging
import random
import torch
import numpy as np
try:
from apex import amp
except ImportError:
pass
from torch.utils.data import (DataLoader,
RandomSampler, SequentialSampler, TensorDataset, SubsetRandomSampler,
Subset, ConcatDataset)
#from transformers import AdamW, Constan... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/training/trainer.py |
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class LookaheadWrapper(Optimizer):
r"""Implements a Lookahead wrapper around a given optimizer
"""
def __init__(self, optimizer, la_steps, la_alpha=0.5):
self.optimizer = optimizer
self._la_step ... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/optim/la.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/optim/__init__.py | |
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class Lookahead0Wrapper(Optimizer):
r"""Implements a Lookahead wrapper around a given optimizer
"""
def __init__(self, optimizer, la_steps, la_alpha=0.5):
self.optimizer = optimizer
self._la_step... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/optim/la0.py |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertForQuestionAnswering
from transformers.modeling_roberta import RobertaModel
class RobertaForQuestionAnswering(BertPreTrainedModel):
base_model_prefix = "roberta"
def __init__(s... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/model/roberta.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/model/__init__.py | |
import os
import numpy as np
import torch
import inspect
from src.pequod.data.utils_squad import RawResult, write_predictions
from src.pequod.data.utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad
def to_list(tensor):
return tensor.detach().cpu().tolist()
def score_dict_to_string(score_dict):
r... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/__init__.py |
import faiss
import json
import logging
import numpy as np
import os
import torch
from src.pequod.data.xretrieval import load_and_cache_examples
from src.pequod.eval.evaluator import Evaluator
from src.pequod.eval.utils_retrieve import mine_bitext, bucc_eval
logger = logging.getLogger(__name__)
def load_embedding... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/bretrieval.py |
import faiss
import json
import logging
import numpy as np
import os
import torch
from src.pequod.data.xretrieval import load_and_cache_examples
from src.pequod.eval.evaluator import Evaluator
logger = logging.getLogger(__name__)
def similarity_search(x, y, dim, normalize=False, dist='L2'):
top_k = 10
num = x... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/xretrieval.py |
# coding=utf-8
# This repository is modified based on the LASER repository.
# https://github.com/facebookresearch/LASER
# Copyright The LASER Team Authors, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/utils_retrieve.py |
import logging
import torch
from torch.utils.data import DataLoader
from src.pequod.training.trainer import to_cuda
logger = logging.getLogger(__name__)
class Evaluator(object):
def __init__(self, args, model, tokenizer, **kwargs):
self.args = args
self.datasets = {}
self.model = model
self.toke... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/evaluator.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/text/__init__.py | |
import os
import logging
import sentencepiece as spm
from transformers.tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
class XLMRTokenizer(PreTrainedTokenizer):
def __init__(self, bpe_file, dict_file, **kwargs):
super(XLMRTokenizer, self).__init__(
bos_token="<s>",
... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/text/tokenization_sentencepiece.py |
"""Loading examples and features for CLS and MLDoc"""
import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
logger = l... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/xdoc.py |
import os
import logging
import torch
from torch.utils.data import TensorDataset
from src.pequod.data.utils_squad import (read_squad_examples,
convert_examples_to_features)
logger = logging.getLogger(__name__)
def load_and_cache_examples(args, split, lang, tokenizer, key="", evaluate=False):
cache_filename = o... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/xqa.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/utils_squad.py |
import logging
from transformers.data.processors.utils import InputFeatures
logger = logging.getLogger(__name__)
def convert_examples_to_features(
processor, examples, tokenizer, max_length, label_list,
pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True):
if label_list is None: label_list =... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/__init__.py |
""" Official evaluation script for SQuAD version 2.0.
Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to ... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/utils_squad_evaluate.py |
"""Load examples from BUCC"""
import logging
import os
import torch
from transformers.data.processors.utils import (
DataProcessor, InputExample, InputFeatures)
from torch.utils.data import (
DataLoader, RandomSampler, SequentialSampler, TensorDataset)
logger = logging.getLogger(__name__)
def load_and_cache... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/xretrieval.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/dataloader.py | |
"""Loading examples and features for WiLI-2018 dataset"""
import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from sr... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/wili.py |
import torch
from torch.utils.data.sampler import Sampler
class SubSampler(Sampler):
def __init__(self, data_source, num_samples):
self.data_source = data_source
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __iter__(self):
n = len(self.data_source)
if s... | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/sampler.py |
import ast
import logging
import os
import os.path as op
import sys
from argparse import Namespace
import numpy as np
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from omegaconf impor... | EXA-1-master | exa/models/unilm-master/speecht5/scripts/generate_speech.py |
from . import data, tasks, criterions, models # noqa | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/__init__.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/sequence_generator.py |
EXA-1-master | exa/models/unilm-master/speecht5/speecht5/tasks/__init__.py | |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/tasks/speecht5.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/t5_transformer_lm.py |
from .speecht5 import * # noqa
from .t5_transformer_lm import * # noqa
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/__init__.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/speecht5.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/decoder.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/multihead_attention.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speaker_decoder_postnet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_encoder_postnet.py |
EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/__init__.py | |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/transformer_layer.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/encoder.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/text_decoder_postnet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_encoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_decoder_postnet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_decoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/text_decoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/text_encoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/speech_to_speech_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/multitask_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/speech_dataset.py |
EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/__init__.py | |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/text_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/speech_to_text_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/text_to_speech_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/speech_to_text_loss.py |
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"speecht5.criterions." + criterion_name
) | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/__init__.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/speecht5_criterion.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/text_pretrain_criterion.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/text_to_speech_loss.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/speech_pretrain_criterion.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/SpeechLM.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/modules.py |
from . import data, tasks, criterions, models
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/__init__.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/unit_generator.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/infer.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/generate_unit.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/tasks/joint_sc2t_pretrain.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/tasks/fast_text_to_unit.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/speechlm.py |
EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/__init__.py | |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/speechlm_st.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/fasttext2unit.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/speechlm_ctcasr.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fai... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/transformer_encoder.py |
# --------------------------------------------------------
# Pre-Training Transformer Decoder for End-to-End ASR Model with Unpaired Speech Data (https://arxiv.org/abs/2203.17113)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/Speech2C
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [s... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/multihead_attention.py |
# --------------------------------------------------------
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/facebookresearch/fairseq
# --------------------------------------------------------
"""
Modified from https://github... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/learned_positional_embedding.py |
# --------------------------------------------------------
# The YiTrans End-to-End Speech Translation System for IWSLT 2022 Offline Shared Task (https://arxiv.org/abs/2206.05777)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/YiTrans
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [se... | EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/w2v_encoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.