code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_:... | 57 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : str , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = word.split()
def justify(UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) -> str:
UpperCAmelCa... | 160 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =os.path.join(args.t... | 625 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax imp... | 625 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-... | 109 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple... | 688 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from... | 709 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__ ( datasets.BeamBasedBuilder... | 317 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import Batch... | 42 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, fou... | 42 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionCon... | 428 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or... | 428 | 1 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = """WhisperFeatureExtractor"""
__lowerCamelCase : List[str] = """WhisperTokenizer"""
def __init__( self , a , a):
super().__init__(a ... | 164 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a = 6):
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(a)
def snake_case_ ( self , a)... | 164 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCamelCase : List[Any] = {
# 1536-bit
5: {
'''prime''': int(
... | 501 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[... | 501 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> bool:
__snake_case = get_failure_array(snake_case_ )
# 2) Step through text searching for pattern
__snake_case , __snake_case ... | 592 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import lo... | 592 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetr... | 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
... | 658 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__A ='https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__A =BASE_URL + '/user'
# https://github.com/... | 407 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a_ = {'''UserAgent''': UserAgent().random}
def _a ( UpperCamelCase_ : Any ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = ... | 339 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class ... | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PR... | 692 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
... | 678 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_m... | 678 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( A__ : Any ) -> Tuple:
lowerCamelCase_ : str = args.pruning_method
lowerCamelCase_ : Any = args.threshold
... | 721 |
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int ) ->None:
lowerCamelCase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase_ : str = False
def _lowerCAmelCase ... | 171 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
... | 428 |
import copy
import random
from transformers import CLIPTokenizer
class A_ ( __a ):
def __init__( self : Tuple , *snake_case__ : Any , **snake_case__ : Tuple ):
super().__init__(*snake_case__ , **snake_case__ )
lowercase ... | 428 | 1 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_schedul... | 135 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase = ... | 135 | 1 |
a : Union[str, Any] = 2_5_6
# Modulus to hash a string
a : str = 1_0_0_0_0_0_3
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: List[str] = len(UpperCAmelCase__ )
lowerCAmelCase_: Any = len(UpperCAmelCase__ )
if... | 613 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A_ : List[Any] =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""-... | 483 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase__ : Dict = ['''small''', '''medium''', '''large''']
lowercase__ : List[Any] = '''lm_head.decoder.weight'''
lowercase__ : int = '''lm_head.weight'... | 700 |
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lower... | 338 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import Co... | 210 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
... | 252 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import... | 179 | '''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no... | 179 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
... | 108 |
import os
import sys
__a: Union[str, Any] = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassi... | 108 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if ... | 701 |
'''simple docstring'''
lowerCAmelCase__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : List[Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "F... | 329 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import... | 93 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not is_accelerate_available():
return method
snake_case__ : Union[str, An... | 648 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Tuple = get_tes... | 66 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase : str , _l... | 66 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_tor... | 474 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_t... | 34 | 0 |
def snake_case (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
assert x is not None
assert y is not None
lowerCamelCase__ = len(UpperCamelCase )
lowerCamelCase__ = len(UpperCamelCase )
# declaring the array for storing th... | 720 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperC... | 235 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : int , _a : str , ... | 459 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : int = min(_lowerCAmelCase ) # min() finds the minimum value
__lowerCamelCase : List[Any] = max(_lowerCAmelCase ) # max... | 459 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/... | 715 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable(... | 525 | 0 |
import numpy as np
def __UpperCAmelCase( lowercase_ ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCame... | 114 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
... | 701 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor... | 444 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : int = log... | 69 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__UpperCAmelCase = '\nArgs... | 65 | 0 |
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _lowerCamelCase ( __UpperCAme... | 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_availabl... | 152 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase_ : Optional[Any] = 300 # TEMPERATURE (unit = K)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
if donor_conc <= 0:
raise V... | 559 | from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__a , __a = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__a = _max - _min + 1
__a , __a ... | 559 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
... | 711 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( _lowerCAmelCase = "" ) -> dict[str, float]:
'''simple docstring'''
__snake_case = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_... | 473 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attentio... | 89 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_avai... | 354 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformer... | 700 | '''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
fr... | 320 | 0 |
"""simple docstring"""
def _lowerCAmelCase(a : float , a : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(a ) * abs(a )
if __name__ == "__main__":
import doctest
doctest.testmod(ve... | 255 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeIma... | 255 | 1 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A = Mapping[str, np.ndarray]
A = Mapping[str, Any] # Is a nested dict.
A = 0.01
@data... | 109 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MA... | 109 | 1 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
a , a :List[str] = position
a :Union[str, Any] = [
(y + 1, x + 2),
(y -... | 445 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
snake_case : Tuple = (7_20, 12_80) # Height, Width
snake_case : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
snake_case... | 605 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available(... | 701 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_ten... | 69 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | 184 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r... | 266 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise Opt... | 591 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils impor... | 591 | 1 |
"""simple docstring"""
def a ( __UpperCAmelCase : list[list[float]] ) -> list[list[float]]:
__magic_name__: list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCAmelCase ):
if len(__U... | 96 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__lowerCamelCase ... | 96 | 1 |
'''simple docstring'''
import re
def _snake_case ( A_ : str ):
"""simple docstring"""
if len(re.findall("""[ATCG]""" , __snake_case ) ) != len(__snake_case ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""... | 705 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case: Optional[Any] = logging.get_logger(__name__)
__snake_case: T... | 460 | 0 |
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase__ ( A_: str ) -> str:
... | 68 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with p... | 91 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ... | 703 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", "... | 78 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tok... | 170 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | 170 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate... | 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj"... | 25 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from... | 37 |
from math import factorial, radians
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 18 , lowerCAmelCase_ = 10):
'''simple docstring'''
lowerCamelCase_ : List[str] = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Convertin... | 250 | 0 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
... | 404 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase ... | 404 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, an... | 6 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( ... | 193 | 0 |
"""simple docstring"""
import socket
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : Any = socket.gethostname()
snake_case__ : Dict = 1_23_12
sock.connect((host... | 150 |
"""simple docstring"""
import math
import unittest
def A__ ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3... | 150 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .... | 489 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor... | 489 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMSche... | 721 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"huggingface/t... | 528 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def _snake_case ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_C... | 433 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE... | 433 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transf... | 277 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __UpperCAmelCase ( __A = True , *__A , **__A ) -> Any:
'''simple docstring'''
if not is_tqdm_a... | 277 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from tr... | 403 | import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, U... | 403 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/mod... | 718 | def lowerCamelCase_ ( _lowercase = 2_000_000 ) -> int:
__A : str = [0 for i in range(n + 1 )]
__A : int = 1
__A : Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
... | 387 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from... | 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "X... | 492 | 0 |
UpperCAmelCase_ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cook... | 476 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
... | 476 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowercase : List[Any] = 'path-to-your-trained-model'
_lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
_lowercase... | 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
... | 49 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffuse... | 710 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...t... | 14 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSp... | 648 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSp... | 648 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
... | 494 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transf... | 494 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
''... | 8 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'facebook/xmod... | 265 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase ... | 719 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_p... | 515 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ):
__UpperCAmelCase : Any = """"""
for word_or_phrase in separated:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise Exception("""join() a... | 63 | """simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test... | 473 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
... | 708 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
'''simple docstring'''
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3... | 230 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'... | 621 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
fro... | 621 | 1 |
'''simple docstring'''
import math
import sys
def __lowerCAmelCase ( a_ ) -> int:
'''simple docstring'''
if number != int(a_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueE... | 179 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids... | 179 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@proper... | 244 | '''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__SCREAMING_SNAKE_CASE : Optional[int] = {"""UserAgent""": UserAgent().random}
def UpperCamelCase_ ( _UpperCAmelCase : Dict ) ->... | 244 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
... | 118 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.tes... | 118 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfi... | 277 | """simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""":... | 277 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | 661 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: ... | 661 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Any = ["image_processor", "tokenizer"]
UpperCamelCase : Optional[Any] = ... | 589 |
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
def __init__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = order
# a_{0} ... a_{k}
_lowerCAmelCase = [1.0] + [0.0] * orde... | 589 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 =... | 687 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] ... | 687 | 1 |
"""simple docstring"""
import os
a_ = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 0
... | 76 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()... | 77 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def a (_lowerCAmelCase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerat... | 89 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accel... | 89 | 1 |
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : str = '''Muhammad Umer Farooq'''
_UpperCAmelCase : Any = '''MIT'''
_UpperCAmelCase : Tuple = '''1.0.0'''
_UpperCAmelCase : List[str] = '''Muhammad Umer Farooq'''
_UpperCAmelCase : Optional[int] = '''contact@muhammadumerfa... | 72 |
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = num_of_nodes
UpperCAmelCase__: list[list[int]] = []
UpperCAmel... | 113 | 0 |
'''simple docstring'''
from itertools import permutations
def _UpperCamelCase ( _a : tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCamelCase... | 287 | '''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a= logging.get_logger(__name__)
a= '''▁'''
a= {
... | 287 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ap... | 66 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[An... | 66 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict, *_lowerCamelCas... | 215 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase ( __UpperCamelCase ):
""... | 215 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_a... | 42 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, lo... | 65 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, a... | 708 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
... | 208 | 0 |
__a: str = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__a: int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__a: int ... | 108 |
import functools
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(_SCREAMIN... | 225 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ... | 710 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> str:
... | 473 | 0 |
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
__A= 1, 1
__A= []
for i in range(1,n + 1 ):
__A= prev_numerator + 2 * prev_denominator
__A= prev_numerator + prev_denominator
if len(str(__a ) ) > len(st... | 186 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations impl... | 14 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase : O... | 425 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( A ) -> List[Tuple[int, ...]]:
lowercase :... | 425 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_t... | 85 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''torchsde''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_... | 601 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""asapp/sew-d-tiny-100k""": """htt... | 703 | """simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = (IPNDMScheduler,)
snake_case__ : List[... | 197 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return choice(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCA... | 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : int = 'opena... | 319 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accele... | 713 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A : Dict = datasets.logging.get_logger(__name__)
A : Optional[Any] = '''\
@InProceedings{moosavi2019... | 247 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
... | 117 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer... | 117 | 1 |
def __lowerCamelCase ( A__ : str ) -> Dict:
lowerCamelCase_ : List[str] = [0] * len(_lowerCamelCase )
lowerCamelCase_ : str = []
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Optional[Any] = 0
for values in graph.values():
... | 706 |
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None , **A__ : Dict ) -> str:
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()]
lowerCamelCase_ : ... | 171 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = ... | 677 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between che... | 367 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = [10, 20, 30, 40, 50, 60]
a__ : Union[str, Any] = ... | 720 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(... | 629 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
... | 653 |
"""simple docstring"""
from manim import *
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCRE... | 247 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[st... | 514 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
... | 514 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from t... | 51 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optio... | 687 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Tuple ... | 566 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a__ = 2_048
a__ = 4_096
a__ = 42
a__ = os.environ.pop('''PROCESS_TRAIN''', '''false''')
a__ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no... | 566 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.