code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from PIL import Image
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Image:
"""simple docstring"""
def brightness(UpperCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0... | 315 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( UpperCAmelCase_ , unittes... | 411 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A_ : Optional[Any] =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
... | 720 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from ... | 606 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__ : Optional[int] ... | 205 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_... | 205 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
imp... | 278 |
import numpy as np
__lowerCAmelCase :Dict = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class _a:
def __init__( self ) -> No... | 278 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from... | 614 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': ... | 473 | 0 |
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_UpperCAmelCa... | 467 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = os.path.join(args.tf_model_dir , "pa... | 467 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
M... | 2 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Optional[int] = l... | 138 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torc... | 333 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_model... | 333 | 1 |
from ...configuration_utils import PretrainedConfig
A : Optional[int] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/goog... | 15 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
# `task` is not a... | 409 | 0 |
def _lowerCAmelCase (_lowerCAmelCase):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
... | 504 |
import sys
UpperCAmelCase : Union[str, Any] =(
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66... | 504 | 1 |
import requests
from bsa import BeautifulSoup
def lowercase ( __A : str = "AAPL" ) -> str:
'''simple docstring'''
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Dict = BeautifulSoup(reque... | 36 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
g... | 81 | 0 |
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]... | 701 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert th... | 655 | 0 |
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(a__ ):
for j in range(a__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][... | 171 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
... | 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER... | 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ ... | 681 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
... | 520 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_... | 53 | 0 |
'''simple docstring'''
import math
def A__ ( A : list , A : int):
'''simple docstring'''
UpperCamelCase : List[Any] = len(A)
UpperCamelCase : int = int(math.floor(math.sqrt(A)))
UpperCamelCase : Optional[int] = 0
... | 711 |
'''simple docstring'''
def A__ ( A : str , A : str):
'''simple docstring'''
if not (isinstance(A , A) and isinstance(A , A)):
raise ValueError("longest_common_substring() takes two strings for inputs")
UpperCamelCase : Optional[int] ... | 435 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
tra... | 34 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
if index == r:
for j in range(UpperCAmelCase_ ... | 522 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokeni... | 516 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Dict = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', ''... | 516 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@req... | 304 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
... | 304 | 1 |
__lowerCAmelCase = 8.314462 # Unit - J mol-1 K-1
def _lowercase ( a__ : float , a__ : float , a__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return mol... | 589 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, r... | 589 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from tra... | 612 |
from __future__ import annotations
def __A(lowerCAmelCase , lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = sum(lowerCAmelCase )
create_st... | 612 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models... | 703 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , *__A :Tuple , ... | 59 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import Tokeniz... | 160 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
A: Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CA... | 160 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
... | 720 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large":... | 190 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) ... | 71 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import ... | 158 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configur... | 705 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelera... | 570 | 0 |
from __future__ import annotations
def lowercase__ ( A_: list[int] , A_: list[int] , A_: list[int] , A_: list[list[str]] , A_: int , ) -> None:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
... | 68 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lower... | 170 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import C... | 366 | """simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
... | 366 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available()... | 547 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''image_processor''', '''tokenizer''']
__A = '''ViTImageProcessor'''
__A ... | 547 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config... | 66 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTes... | 66 | 1 |
import math
def _lowerCAmelCase ( lowerCAmelCase_ :int )->int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(l... | 283 |
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : int , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ) -> Union... | 283 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoC... | 512 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffu... | 512 | 1 |
'''simple docstring'''
UpperCAmelCase = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/... | 119 |
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase : List[Any] = 0
for ch in input_str:
UpperCamelCase : Optional[Any] = ord(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = pow(2 , _lowerCAmelCase )
# If we already turned on bit for ... | 629 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatu... | 354 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils impo... | 354 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAI... | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
Dat... | 691 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=None , SCREAMING_... | 701 | def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowerCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0... | 452 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__UpperCAmelCase = tuple[int, int]
class a__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
lowerCAmelCase__... | 90 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase ... | 86 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [
"deco... | 447 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_i... | 447 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CO... | 38 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
f... | 227 | 0 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils impor... | 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHI... | 573 | 0 |
from timeit import timeit
_lowerCAmelCase : int = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
... | 246 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_lowerCAmelCase : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
... | 246 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
... | 713 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoCo... | 151 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : List[Any] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation o... | 110 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , ... | 250 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
... | 545 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCas... | 545 | 1 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,... | 78 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple ... | 67 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalD... | 427 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""sail/poo... | 427 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIG... | 226 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__:str = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse... | 528 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple ) -> None:
'''simple docstring'''
__UpperCa... | 710 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase ... | 296 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is... | 317 |
from __future__ import annotations
import math
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError... | 317 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
... | 704 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass... | 277 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__( UpperCAmelCase , unittest.TestCase ):
... | 97 | # HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning th... | 166 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = field(default="""image-clas... | 716 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding,... | 590 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTok... | 210 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : ... | 210 | 1 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from trans... | 545 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
UpperCAmelCase__ : Any = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm... | 545 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : int = {
"google/umt5-small": "https://huggingface.co/g... | 105 |
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = str(bin(__UpperCamelCase ) )[2:] ... | 141 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
... | 549 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[str] = sta... | 549 | 1 |
from collections import deque
from .hash_table import HashTable
class __A( UpperCAmelCase ):
def __init__( self : int , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ):
super().__init__(*__UpperCamelCase , ... | 272 |
import os
import sys
lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
A... | 272 | 1 |
def A_( A = 10 ):
if not isinstance(A , A ) or n < 0:
raise ValueError("""Invalid input""" )
UpperCAmelCase_ = 10**n
UpperCAmelCase_ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
... | 486 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate imp... | 486 | 1 |
import collections
import os
import re
from pathlib import Path
_UpperCamelCase = "src/transformers"
# Matches is_xxx_available()
_UpperCamelCase = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_UpperCamelCase = re.compile(r"^_import... | 492 |
_UpperCamelCase = {str(digit): digit**5 for digit in range(10)}
def _lowercase ( lowercase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase__ ) )
def _lowercase ( ):
return sum(
number
for number in range(1_0_0_0 ,... | 492 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_chec... | 42 |
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCame... | 42 | 1 |
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1) != 0)
def lowercase__ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0) == 0
assert or_gate(0 ... | 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Tuple = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONF... | 280 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import ... | 708 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licen... | 385 | 0 |
'''simple docstring'''
import random
from typing import Any
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ):
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase = random.ran... | 447 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerC... | 99 | 0 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Dict ) -> List[str]:
... | 513 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = (DDPMScheduler,)
def __UpperCAmelCase ( self : Optional... | 513 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..mo... | 175 |
from __future__ import annotations
def a__ ( _UpperCamelCase : list[float] ):
if len(_UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be gre... | 175 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_A = logging.get_logger(_... | 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase__ ( __lowerCAmelCase : Iterable[str] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = iter(__lowerCAmelCase )
w... | 279 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase_ = False
class lowerCAmelCase_ ( ... | 603 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CO... | 603 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://hug... | 709 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case__ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Any ... | 171 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCa... | 91 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats... | 91 | 1 |
_UpperCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
... | 704 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ... | 70 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def ... | 501 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase... | 501 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[An... | 719 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or... | 428 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acc... | 236 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-... | 125 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), (''... | 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a: Tuple ... | 402 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test... | 5 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_... | 71 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( _SCREAMING_SNAKE_CASE ):
_a : ... | 703 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_mo... | 497 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un... | 452 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Pr... | 364 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, to... | 128 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViT... | 128 | 1 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Image:
def brightness(__snake_case ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be betwee... | 108 |
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return (x * y) //... | 521 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__... | 701 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_av... | 690 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Any:
_lowerCAmelCase : Union[str, Any] = [
"""encoder.version""",
... | 213 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "llama"
a... | 159 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import Fl... | 704 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most comm... | 220 | 0 |
import pprint
import requests
lowercase_ = """https://zenquotes.io/api"""
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_... | 235 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = [0] * len(_SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = [1] * len(_SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
... | 235 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput... | 177 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
Au... | 177 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docs... | 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sente... | 657 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] = None , snake_case_ : str = None ):
if start is None:
__magic_name__ = 0
if end is None:
__magic_name__ = len(_snake_case ) ... | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int... | 678 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelera... | 49 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Token... | 49 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : int = logging.get_logger(__name__)
__A : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google... | 719 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
... | 698 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ... | 283 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE :Union[str, Any] = parse(importlib.metadata.version('''torch'''))
def _lowerCAmelCase ( lowerCAmelCase_ :Union... | 283 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_i... | 369 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalD... | 369 | 1 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A = 6378137.0
A = 6356752.314245
A = 6_378_137
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ... | 77 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase ... | 453 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffus... | 284 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
... | 284 | 1 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgument... | 480 |
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0 ):
"""simple docstring"""
snake_case_ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...... | 480 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( a__ ):
_lowerC... | 143 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common ... | 143 | 1 |
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = len(_lowerCAmelCase )
__lowercase = [[0] * n for i in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
__lowercase = y_... | 639 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import M... | 327 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.ka... | 190 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowercase ( __UpperCamelCase ) -> Any:
return choice(__UpperCamelCase )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> int:
__magic_name__ = ran... | 190 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | """simple docstring"""
from timeit import timeit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
_lowerCAmelCase : Dict = 0
while number:
number &= number - 1
result +=... | 213 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
... | 720 |
import re
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if len(re.findall('''[ATCG]''' , __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' ... | 561 | 0 |
def __UpperCAmelCase ( __a : str ,__a : str ) -> list:
"""simple docstring"""
_a : Tuple = len(__a )
_a : str = []
for i in range(len(__a ) - pat_len + 1 ):
_a : Any = True
... | 14 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_P... | 668 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceCl... | 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logg... | 80 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.