code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : str = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n ... | 571 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( __a ):
return 1 / (1 + np.exp(-z ))
d... | 571 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
imp... | 271 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transform... | 271 | 1 |
import math
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = len(A )
UpperCamelCase__ = int(math.floor(math.sqrt(A ) ) )
UpperCamelCase__ = 0
while arr[min(A , A ) - 1] < x:
UpperCamelCase__ =... | 415 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCamel... | 415 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_ima... | 705 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer... | 290 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__low... | 181 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import C... | 181 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values... | 343 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 50 ):
'''simple docstring'''
A : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start i... | 343 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMSche... | 69 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''kakaobrain... | 149 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__: Union[str, Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_M... | 706 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCST... | 528 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
if not is_accelerate_available():
return method
_lowercase : int = versio... | 89 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase__ ( lowerCAmelCase ... | 373 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( A__ ):
"""simple docstring""... | 592 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance c... | 592 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
... | 256 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase_ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_to... | 256 | 1 |
from __future__ import annotations
import math
class __lowercase:
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : int ) -> None:
_lowerCAmelCase = size
# approximate the overall size of segment tree with given value
... | 585 |
from __future__ import annotations
import math
class __lowercase:
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : int ) -> None:
_lowerCAmelCase = size
# approximate the overall size of segment tree with given value
... | 585 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithP... | 109 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""P... | 6 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simpl... | 652 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
... | 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenizatio... | 15 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_... | 693 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}... | 717 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ... | 219 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_M... | 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
... | 503 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ = 5_0 ):
lowerCamelCase_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
... | 313 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if ... | 313 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path ... | 97 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - эт... | 629 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProces... | 713 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclas... | 609 | 0 |
'''simple docstring'''
from math import pow
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ):
'''simple docstring'''
... | 18 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassificat... | 304 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( s... | 567 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...uti... | 567 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE_ =... | 34 |
import argparse
from collections import defaultdict
import yaml
a = 'docs/source/en/_toctree.yml'
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = defaultdict(UpperCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key... | 412 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__a : str = numpy.array([0, 0])
__a : Dict = numpy.array([0.5, 0.8_66_02_54])
__a : Optional[int] = numpy.ar... | 718 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a : int = 2_0_4_8
__a : Optional[int] = 4_0_9_6
__a : Optional[int] = 4_2
__a : Optional[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
__a... | 414 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixi... | 512 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common impor... | 512 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_... | 39 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_co... | 39 | 1 |
from collections.abc import Callable
def a ( snake_case__: Callable[[float], float] , snake_case__: float , snake_case__: float ):
'''simple docstring'''
lowercase_ = a
lowercase_ = b
if function(snake_case__ ) == 0: # one of the a or b is a root ... | 97 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"... | 437 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
... | 701 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require... | 133 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] ):
"""simple docstring"""
# test for the above condition
self.te... | 103 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
... | 59 | 0 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
def __init__( self , lowercase_ , lowercase_ , lowercase_):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height sh... | 713 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Any = [0] * no_of_processes
snake_case_ : Op... | 92 | 0 |
def lowercase__ ( A_: Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
for i in range(length - 1 ):
__UpperCAmelCase =i
for k in range(i + 1 , A_ ):
if collection[... | 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.... | 68 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _snake_case :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Any = data
... | 465 |
'''simple docstring'''
import random
def A_ ( snake_case , snake_case , snake_case = False ):
SCREAMING_SNAKE_CASE:dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >... | 465 | 1 |
'''simple docstring'''
import math
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if a... | 407 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0 , UpperCamelCase__ = 1_0_0_0 , UpperCamelCase__ = True ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
... | 407 | 1 |
"""simple docstring"""
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = name
lowercase = value
lowercase = weight
def __repr__( self ):
return F'{self.__cla... | 705 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, Bl... | 134 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcess... | 54 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from... | 322 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a (lowercase_ ):
'''simple docstring'''
Upp... | 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] ... | 64 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =""
UpperCAmelCase =(
None # ... | 446 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
fr... | 446 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(R'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(R'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(R'(?<!_)_(?!_)')
UpperCAmelCase_ ... | 706 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_conf... | 369 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformer... | 587 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enabl... | 312 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = fi... | 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common i... | 172 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase_ = {
"""debug""": logg... | 413 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_... | 507 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCas... | 24 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDa... | 24 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
... | 254 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : str = pytest.mark.integration
@pytest.mark.pa... | 123 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
f... | 127 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_: Union[str, Any] = '<<<<<<< This should probably be modified because it mentions: '
lowercase_:... | 127 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[list[int | float]] ):
A = len(snake_case__ )
A = len(matrix[0] )
A = min(snake_case__ , snake_case__ )
for row in range(snake_case__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Elimina... | 91 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distribu... | 91 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import ... | 295 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List... | 295 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
c... | 137 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
fr... | 402 | 0 |
'''simple docstring'''
import math
def __lowercase (_lowercase ) -> bool:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowercase )
def __lowerc... | 483 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowercase (_lowercase ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__lowerCamelCase : Optional[int] = {"""+""", """-""", """*""", """/"""}
... | 483 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''keras_nlp''']
def __init__( self : Union[str, Any] ,*_a : List[An... | 229 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ():
"""simple docstring"""
raise RuntimeError('CUDA out of memory... | 229 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False... | 328 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False... | 328 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__magic_name__ ) * abs(__magic_name__... | 92 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import Mod... | 92 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require... | 705 | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
U... | 71 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] ... | 462 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase ( _Up... | 462 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "enc... | 256 |
"""simple docstring"""
__UpperCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = 0
while number:
... | 256 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common imp... | 291 |
from __future__ import annotations
lowerCAmelCase : List[Any] = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0,... | 511 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return number | (1 << position)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return number & ~(1 << position)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCA... | 710 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configur... | 627 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_tor... | 34 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin,... | 407 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class ... | 273 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A : Optional[Any] = tuple[int, int]
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : set[int] , __snake_case : Mapping[E... | 273 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {
... | 624 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_sched... | 624 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
... | 556 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer... | 556 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = ... | 448 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positiv... | 64 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _a:
lowerCamelCase__ :str = field(
metadata={'help': 'The output d... | 721 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r... | 278 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a__ ( UpperCAmelCase__ ):
__magic_na... | 507 | import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case__ ( lowercase , l... | 613 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 600851475143 ) -> int:
try:
snake_case : Optional[int] = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Param... | 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : ... | 684 | 1 |
"""simple docstring"""
import math
def lowercase (_snake_case ,_snake_case ) -> int:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > ... | 505 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'AutoImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self ... | 85 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_ : str = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: ''... | 274 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from a... | 274 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _snake_case ( snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ):
A = a
while True:
A = D... | 91 |
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _lowerCAmelCase ( __magic_name_... | 121 | 0 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are ... | 134 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils... | 134 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from t... | 232 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAm... | 232 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Opti... | 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(s... | 676 | 0 |
lowerCAmelCase__ : List[Any] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCas... | 101 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils im... | 298 | 0 |
import string
from math import logaa
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" ... | 704 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
assert x is not None
assert y is not None
__lowercase = len(UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# dec... | 442 | 0 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(... | 98 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.d... | 23 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = int(__UpperCAmelCase )
# Initialize Result
UpperCAmelCase_ = []
# Traverse through all denomination
for denomination in rever... | 561 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import ... | 561 | 1 |
'''simple docstring'''
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',... | 90 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMR... | 372 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
... | 650 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
fr... | 650 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ ={
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDepen... | 521 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesserac... | 521 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggin... | 713 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return abs(__magic_name__ ) if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
while y: # --> when y=0 then loop will terminate and retu... | 206 | 0 |
import socket
def a_ ( ):
lowerCAmelCase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
w... | 615 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Optional[Any] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
... | 615 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : Any , ... | 234 | from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase ( ) -> tuple[list[int], int]:
_lowerCamelCase = [randint(-10_00 , 10_00 ) for i in range(10 )]
_lowerCamelCase ... | 234 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"""conf... | 199 |
'''simple docstring'''
import math
def snake_case_ ( lowercase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes nu... | 199 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_ba... | 394 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a__ = gray_code_sequence_string(_lowercase )
... | 394 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__snake_case = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and F... | 200 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '▁'... | 200 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase : int = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__... | 709 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-At... | 66 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required ... | 324 |
import math
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = len(_A )
__magic_name__ : Tuple = int(math.floor(math.sqrt(_A ) ) )
__magic_name__ : Optional[int] = 0
w... | 324 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteSchedu... | 717 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MA... | 109 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( _lowercase : Dict ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Optional[Any] = False
def lower... | 595 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = r'''
Args:
input_ids (`torch.LongTensor` of shape `(b... | 395 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data... | 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, p... | 622 | 1 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : float ,lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase__ ( lowerCamelCase : float ,lowerCamelCase : float ,lowerCa... | 128 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from .... | 128 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tenso... | 707 |
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Dict = [0] * len(UpperCamelCase_ )
for i in range(1 , len(UpperCamelCase_ ) ):
# use last results for better performance - dynamic programming
... | 196 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """M-CLIP"""
def __init__( self , lowerCAmelCase=1_024 , lowerCAmelCase=768 , **lowerCAmelCase ) -> O... | 291 |
from __future__ import annotations
from statistics import mean
def a ( A__ : list[int] , A__ : list[int] , A__ : int ) -> list[int]:
"""simple docstring"""
_lowercase =[0] * no_of_processes
_lowercase =[0] * no_of_processes
... | 291 | 1 |
import os
from distutils.util import strtobool
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ):
for e in env_keys:
__a : Optional[Any] = int(os.environ.get(lowercase__ , -1 ) )
... | 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # C... | 63 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceCl... | 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
r... | 699 | 1 |
'''simple docstring'''
UpperCamelCase_ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelC... | 320 | '''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
fr... | 320 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __magic_name__ ( _lowerCamelCase: int ) -> bool:
'''simple docstring'''
lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def __magic_nam... | 535 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
def wrapper(*_lowerCamelCase: An... | 535 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCas... | 606 | '''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def snake_case_ ( __snake_case : str = "laptop") -> DataFrame:
lowerCAmelCase_ = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCAmelC... | 606 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""facebook/wav2vec2-base-96... | 501 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configu... | 501 | 1 |
def lowerCamelCase( a__):
if num < 0:
return False
_SCREAMING_SNAKE_CASE =num
_SCREAMING_SNAKE_CASE =0
while num > 0:
_SCREAMING_SNAKE_CASE =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doc... | 191 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = '''T5Config'''
cla... | 191 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( A__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = CustomTokenizer
pass
| 548 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNot... | 369 | 0 |
'''simple docstring'''
def __a ( A__ ) -> bool:
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def __a ( A__ ) -> bool:
lowerCAmelCase = credit_card_number
lowerCAmelCase = 0
lowerCAmelCase = len(... | 159 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax... | 159 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.