code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@r... | 189 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed t... | 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Any = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIP... | 163 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
req... | 163 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import log... | 352 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "vocab.txt", ... | 604 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ... | 249 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase_ )
_a : ... | 249 | 1 |
'''simple docstring'''
import sys
UpperCamelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""... | 92 | lowerCamelCase__ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'd... | 524 | 0 |
'''simple docstring'''
def _a ( lowerCamelCase_ ):
snake_case : Dict =1
snake_case : int =2
while i * i <= n:
snake_case : Union[str, Any] =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *=... | 136 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =list(lowerCamelCase_ )
snake_case : Optional[int] =list(low... | 136 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_ac... | 90 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCrite... | 440 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : List[Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-sa... | 712 | '''simple docstring'''
from timeit import timeit
__snake_case : List[Any] = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test d... | 174 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distr... | 556 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ... | 556 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def snake_case ( ):
A = os.path.dirname(os.path.realpath(UpperCAmelCase ) )
A = os.path.join(Upper... | 700 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case ( UpperCAmelCase : List[Any] ):
if "model" in orig_key:
A = orig_key.replace('model.', '' )
if "norm1" in orig_key:
A = orig_key.replace('norm1'... | 110 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger(__... | 444 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["torch", "torchsde"]
def __init__( sel... | 444 | 1 |
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__( _A ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (s... | 705 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _SC... | 265 | 0 |
def a ( A__ = 1_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = set()
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n + 1 # maximum limit
for a in ra... | 35 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __a ( a ):
"""simple docstring"""
_a = min(a ) # min() finds the minimum value
_a = max(a ) # max() finds the maximum value
_a = max_val - min... | 388 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_ac... | 704 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common im... | 549 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():... | 447 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optio... | 447 | 1 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
__A = '''us-east-1''' # defaults region
@dataclass
class a_ :
_snake_case = 42
_snake_case = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
... | 712 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_scor... | 61 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A ( _lowerCAmelCase ):
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Tr... | 22 |
'''simple docstring'''
UpperCAmelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalor... | 433 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( __a ):
def __init__( self : Tuple , snake_case__ : ... | 72 |
# using dfs for finding eulerian path traversal
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase , l... | 72 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:... | 517 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "h... | 517 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import ... | 213 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr... | 213 | 1 |
'''simple docstring'''
from math import pi
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 69 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=Tr... | 69 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case_ )
if is_prime(snake_ca... | 702 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=() , snake_case_=None ... | 572 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
... | 34 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = 'docs/source/en/_toctree.yml'
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Optional[int] = defaultdict(_lowerCAmelCase )
__lowerCam... | 459 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from acceler... | 258 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ... | 258 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "isbn/0140328726" ):
snake_case__ = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/... | 276 |
import math
__magic_name__ = 10
__magic_name__ = 7
__magic_name__ = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 20 ):
snake_case__ = math.comb(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = math.comb(NUM... | 276 | 1 |
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self ):
lowerCamelCase_ : Tuple = {}
def _UpperCamelCase ( self ):
print(self.vertex )
for i in self.vertex:
print(a_ , ... | 73 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _Up... | 73 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ... | 306 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = (KDPM... | 311 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : Any =... | 709 |
"""simple docstring"""
snake_case_ : List[str] = 6_5_5_2_1
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[int] = 0
for plain_chr in plain_text:
UpperCA... | 292 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
... | 630 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import re... | 433 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
... | 715 |
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if k in (0.04, 0.06):
UpperCamelCase =... | 170 | 0 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> List[Any]:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
a_ = 4
a_ = (1 << p) - 1
for... | 685 |
"""simple docstring"""
from timeit import timeit
def UpperCAmelCase ( snake_case : int ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_lowerCAmelCase:str = 0
while number:
number &= number - 1
... | 227 | 0 |
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowercase : Union[str, Any] = str(bin(lowerCAmelCase_ ... | 721 |
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple... | 649 | 0 |
import argparse
from collections import defaultdict
import yaml
__A = "docs/source/en/_toctree.yml"
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =defaultdict(UpperCamelCase__ )
lowerCamelCase__: Dict ... | 59 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the c... | 407 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A ( __snake_case : ArgumentParser ) -> int:
raise NotImplementedError()
... | 700 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTester... | 528 | 0 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ : Dict = 1_00
lowerCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ : int
for prime in range... | 238 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfi... | 238 | 1 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ... | 211 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_UpperCamelCase = ... | 211 | 1 |
"""simple docstring"""
from math import ceil
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[str] = list(range(0 , _UpperCAmelCase ) )
lowerCAmelCase_ : Optional[int] =... | 610 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SN... | 671 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a : Dict = logging.getLogger()
def lowercase ( ... | 609 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import... | 609 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_: Any ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ... | 449 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CO... | 299 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
cla... | 713 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : L... | 176 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase_( __magic_name__ : Iterable[str] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = iter(__magic_name__ ... | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import Backb... | 687 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowercase :
def __init__( self , lowercase ) -> List[Any]:
lowerCAmelCase = data
lowerCAmelCase = None
lowerCAmelCase = None
def UpperCAme... | 706 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"kss... | 393 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_... | 161 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, M... | 161 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_snake_case = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.... | 720 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Optional[int] , *_lowercase :Tuple , ... | 611 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase (_SCREAMING_SNAKE_CASE :Dict... | 507 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
f... | 507 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case , __snake_case : Any = 9, 1_4 # noqa: F841
__snake_case : ... | 715 | def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : int = 1
__snake_case : Any = 2
while i * i <= n:
__snake_case : Tuple = 0
while n % i == 0:
n... | 390 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_tr... | 624 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, Tr... | 39 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowercase__ ( __A ):
def __init__( self , *_lowercase , **_lowercase ... | 709 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import loggin... | 440 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available()... | 159 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowercase :
_A = None
_A = False
_A = False
_A = False
_A = None
_A = None
_A = False
_A = False
_A ... | 461 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Union[str, Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONF... | 704 | import collections
import os
import re
from pathlib import Path
__lowerCAmelCase : Tuple = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase ... | 164 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = []
def __magic_name__ ( _lowerCamelCase: list[list[int]], _lowerCamelCase: int, _lowerCamelCase: int ) -> bool:
'''simple docstring'''
for i in range(len(_lowerCamelCase ) ):
if board[r... | 535 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = [1]
for i in range(2, _lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * ... | 535 | 1 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
A__ : Optional[Any] ... | 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransf... | 687 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ..... | 39 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
... | 39 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
... | 585 |
from PIL import Image
def _a ( __SCREAMING_SNAKE_CASE : Image ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(_... | 585 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_visi... | 166 | A__ = range(2, 20 + 1)
A__ = [10**k for k in range(ks[-1] + 1)]
A__ = {}
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : str):
lowerCamelCase :Optional[Any] = sum(a_i[j] for j... | 166 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE... | 432 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
... | 432 | 1 |
from math import isqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ... | 32 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''... | 309 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case : Optional[Any] = TypeVar('T')
snake_case : List[Any] = TypeVar('U')
class lowerCAmelCase__ ( Generic[T, U] ):
def __init__( self ... | 182 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLi... | 182 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from... | 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_confi... | 227 | 0 |
import numpy as np
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return vector * sigmoid(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.tes... | 655 | 1 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm mus... | 599 | """simple docstring"""
_UpperCamelCase : Any = {str(digit): digit**5 for digit in range(10)}
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCAmelCase ) )
def a_ ( ):
... | 599 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
... | 633 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ... | 633 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : Any = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
... | 621 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
fro... | 621 | 1 |
def UpperCAmelCase_( a__ ) -> Optional[Any]:
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase_( a__ , a__ ) -> int:
"""simple docstring"""
if equation(a__ ) * equation(a__ ) >= 0:
raise ValueError('''Wrong s... | 711 |
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
if index == r:
for j in range(a__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[... | 333 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ... | 57 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_... | 57 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__magic_name__ : Optional[int] = 'sshleifer/... | 608 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
... | 608 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNext... | 491 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | 491 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput... | 554 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.o... | 554 | 1 |
def __magic_name__ ( lowercase ) -> List[str]:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
lowercase_ : Optional[int] = len(lowercase )
lowercase_ : int = ... | 458 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
cente... | 458 | 1 |
'''simple docstring'''
def __lowerCamelCase ():
return 1
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0... | 713 | import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import C... | 647 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
... | 210 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.e... | 410 | 0 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _A ( A__ , A__ , A__ , A__=None ):
"""simple docstring"""
__lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowercase , __lowercase... | 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
... | 624 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if "model" in orig_key:
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_key.replace("""model.""" , """""" )
... | 578 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDep... | 387 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagem... | 711 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCa... | 487 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
... | 379 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (... | 379 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ... | 5 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
... | 5 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__SCREAMING_SNAKE_CASE = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlat... | 553 |
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = len(a__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase :Dict = ... | 553 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_com... | 701 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if b == 0:
return (1, 0)
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , a % b )
a__ : Opti... | 340 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import Conf... | 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils im... | 654 | 1 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( __UpperCamelCase )-> float:
return np.dot(__UpperCamelCase , __UpperCamelCase )
class a_ :
... | 721 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keep... | 35 | 0 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)... | 449 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizer... | 449 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tenso... | 712 |
from __future__ import annotations
import requests
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__lowerCamelCase ).json()
def lowerCa... | 381 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : int = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json... | 98 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
... | 43 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy a... | 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
fr... | 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Any = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',... | 140 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import l... | 86 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = 0
for ch in input_str:
UpperCamelCase__ = ord(UpperCamelCase__ )
UpperCamelCase__ = pow(2, UpperCamelCase__ )
# If we ... | 591 | from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
... | 591 | 1 |
'''simple docstring'''
import os
def A__ ( A : str = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(A) , A)) as input_file:
UpperCamelCase : str = [
[int(A) for element in line.split(",")]
for line in inp... | 173 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def A__ ( A : np.ndarray):
'''simple docstring'''
return input_array.reshape((input_array.size, 1))
def A__ ( ... | 173 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : List[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX m... | 356 | from collections.abc import Callable
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = a
SCREAMING_SNAKE_CASE_ = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
... | 356 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__lowerCAmel... | 201 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, ra... | 201 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_a = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ... | 78 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", "... | 78 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : List[str] = get_tests_dir('f... | 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The inp... | 693 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( SCREAMING_SNAKE_CASE , u... | 207 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
t... | 207 | 1 |
'''simple docstring'''
import os
import sys
import transformers
_lowerCAmelCase = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", tor... | 161 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSe... | 539 | 0 |
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optio... | 181 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 100 ):
"""simple docstring"""
UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
r... | 181 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dime... | 36 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_c... | 132 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=lowercase__ ):
lowerCamelCase : List[Any] = ['''note_seq''']
def __init__( self : Union[str, Any] , *_lowercase : Any , **_lowercase : int ):
requires_backend... | 705 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from ... | 250 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
fr... | 323 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.cei... | 19 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils i... | 114 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not i... | 114 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCamelCase__ : List[str] = ... | 410 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
... | 652 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x34... | 719 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_... | 178 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.