code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A__ ( ) ->List[Any]:
raise RuntimeError('''CUDA out of memory.''' )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__()
__A =nn.Linear(3 , 4 )
__A =nn.BatchNormad(4 )
__A =nn.Linear(4 , 5 )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =[]
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase__ ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =[]
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase__ , lowercase__ ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__A =mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __UpperCamelCase ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase__ ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __UpperCamelCase ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowercase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __UpperCamelCase ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase__ , lowercase__ , lowercase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __UpperCamelCase ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowercase__ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =torch.cuda.memory_allocated()
__A =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__A =release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 184 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''note_seq''']
def __init__( self : Tuple , *__a : int , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['note_seq'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Optional[Any] , **__a : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
@classmethod
def A_ ( cls : List[Any] , *__a : Union[str, Any] , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
| 286 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase_ = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowercase_ = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowercase_ = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'],reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
],)
def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : Any,lowercase_ : List[str]=None,lowercase_ : Optional[int]=True,lowercase_ : Any=False )-> Optional[Any]:
'''simple docstring'''
if rouge_types is None:
A__ = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
A__ = rouge_scorer.RougeScorer(rouge_types=lowercase_,use_stemmer=lowercase_ )
if use_aggregator:
A__ = scoring.BootstrapAggregator()
else:
A__ = []
for ref, pred in zip(lowercase_,lowercase_ ):
A__ = scorer.score(lowercase_,lowercase_ )
if use_aggregator:
aggregator.add_scores(lowercase_ )
else:
scores.append(lowercase_ )
if use_aggregator:
A__ = aggregator.aggregate()
else:
A__ = {}
for key in scores[0]:
A__ = [score[key] for score in scores]
return result
| 586 |
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ) -> None:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE__ )
print('The following activities are selected:' )
# The first activity is always selected
A__ = 0
print(SCREAMING_SNAKE_CASE__ , end=',' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=',' )
A__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [1, 3, 0, 5, 8, 5]
lowercase_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 586 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCamelCase : Tuple =sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Dict:
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class __a :
_lowerCAmelCase : int
_lowerCAmelCase : float
_lowerCAmelCase : str
_lowerCAmelCase : bool
@dataclass
class __a :
_lowerCAmelCase : int = 4_2
_lowerCAmelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class __a :
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[bool] = None
class __a ( A__ ):
_lowerCAmelCase : int = '''titi'''
_lowerCAmelCase : Optional[Any] = '''toto'''
class __a ( A__ ):
_lowerCAmelCase : List[str] = '''titi'''
_lowerCAmelCase : List[Any] = '''toto'''
_lowerCAmelCase : Union[str, Any] = 4_2
@dataclass
class __a :
_lowerCAmelCase : BasicEnum = "toto"
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : int = BasicEnum(self.foo )
@dataclass
class __a :
_lowerCAmelCase : MixedTypeEnum = "toto"
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : List[str] = MixedTypeEnum(self.foo )
@dataclass
class __a :
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[float] = field(default=A__ , metadata={'''help''': '''help message'''} )
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : Optional[List[str]] = list_field(default=[] )
_lowerCAmelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class __a :
_lowerCAmelCase : List[int] = list_field(default=[] )
_lowerCAmelCase : List[int] = list_field(default=[1, 2, 3] )
_lowerCAmelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
_lowerCAmelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __a :
_lowerCAmelCase : List[int] = field()
_lowerCAmelCase : str = field()
_lowerCAmelCase : BasicEnum = field()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class __a :
_lowerCAmelCase : int
_lowerCAmelCase : "BasicEnum" = field()
_lowerCAmelCase : "Optional[bool]" = None
_lowerCAmelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
_lowerCAmelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class __a :
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : bool | None = None
@dataclass
class __a :
_lowerCAmelCase : int | None = None
_lowerCAmelCase : float | None = field(default=A__ , metadata={'''help''': '''help message'''} )
_lowerCAmelCase : str | None = None
_lowerCAmelCase : list[str] | None = list_field(default=[] )
_lowerCAmelCase : list[int] | None = list_field(default=[] )
class __a ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : argparse.ArgumentParser , SCREAMING_SNAKE_CASE : argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in vars(SCREAMING_SNAKE_CASE ).items() if k != "container"}
UpperCamelCase__ : str = {k: v for k, v in vars(SCREAMING_SNAKE_CASE ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , SCREAMING_SNAKE_CASE ) and yy.get("choices" , SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](SCREAMING_SNAKE_CASE ) , yy["type"](SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--flag" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , const=SCREAMING_SNAKE_CASE , nargs="?" )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase__) , ) : int = parser.parse_args_into_dataclasses(SCREAMING_SNAKE_CASE , look_for_args_file=SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=SCREAMING_SNAKE_CASE , help="help message" )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , const=SCREAMING_SNAKE_CASE , nargs="?" )
expected.add_argument("--baz" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , const=SCREAMING_SNAKE_CASE , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=SCREAMING_SNAKE_CASE , dest="baz" )
expected.add_argument("--opt" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase__ : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = parser.parse_args([] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Any = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[Any] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : str = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase__ : List[str] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ : Dict = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase__ : Any = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __lowercase ( self : int ):
'''simple docstring'''
@dataclass
class __a :
_lowerCAmelCase : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase__ : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ : Tuple = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ : Optional[int] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Tuple = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = parser.parse_args([] )
self.assertEqual(
SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase__ : Optional[Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--baz" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--ces" , nargs="+" , default=[] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--des" , nargs="+" , default=[] , type=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase__ : Optional[int] = HfArgumentParser(SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = parser.parse_args([] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , bar=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
UpperCamelCase__ : Tuple = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.1_4 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--required_str" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=SCREAMING_SNAKE_CASE , )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=SCREAMING_SNAKE_CASE , )
expected.add_argument("--opt" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
UpperCamelCase__ : Optional[Any] = parser.parse_dict(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Union[str, Any] = BasicExample(**SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(SCREAMING_SNAKE_CASE , parser.parse_dict , SCREAMING_SNAKE_CASE , allow_extra_keys=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : Any = os.path.join(SCREAMING_SNAKE_CASE , "temp_json" )
os.mkdir(SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase__ : Union[str, Any] = BasicExample(**SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE , "temp_yaml" )
os.mkdir(SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase__ : Optional[Any] = BasicExample(**SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE ) | 228 |
lowerCamelCase : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
lowerCamelCase : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCamelCase__ : Tuple = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(__lowerCAmelCase )}'
)
raise ValueError(__lowerCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : int = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__A : List[Any] = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(a ):
os.makedirs(a )
__A : Union[str, Any] = model.state_dict()
def to_tf_var_name(a ):
for patt, repl in iter(a ):
__A : Optional[Any] = name.replace(a , a )
return F"""bert/{name}"""
def create_tf_var(a , a , a ):
__A : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__A : List[Any] = tf.get_variable(dtype=a , shape=tensor.shape , name=a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A : Optional[Any] = to_tf_var_name(a )
__A : List[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A : Any = torch_tensor.T
__A : int = create_tf_var(tensor=a , name=a , session=a )
tf.keras.backend.set_value(a , a )
__A : List[Any] = session.run(a )
print(F"""Successfully created {tf_name}: {np.allclose(a , a )}""" )
__A : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(a , os.path.join(a , model_name.replace('-' , '_' ) + '.ckpt' ) )
def _SCREAMING_SNAKE_CASE ( a=None ) -> Union[str, Any]:
__A : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=a , required=a , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=a , default=a , required=a , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=a , required=a , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=a , required=a , help='Directory in which to save tensorflow model' )
__A : Tuple = parser.parse_args(a )
__A : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 77 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ):
debug_launcher(test_ops.main )
| 77 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]):
if isinstance(UpperCAmelCase__ , torch.Tensor):
return image
elif isinstance(UpperCAmelCase__ , PIL.Image.Image):
lowerCamelCase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image):
lowerCamelCase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image]
lowerCamelCase : Optional[Any] = np.concatenate(UpperCAmelCase__ , axis=0)
lowerCamelCase : Tuple = np.array(UpperCAmelCase__).astype(np.floataa) / 2_5_5.0
lowerCamelCase : Dict = image.transpose(0 , 3 , 1 , 2)
lowerCamelCase : List[str] = 2.0 * image - 1.0
lowerCamelCase : int = torch.from_numpy(UpperCAmelCase__)
elif isinstance(image[0] , torch.Tensor):
lowerCamelCase : Optional[int] = torch.cat(UpperCAmelCase__ , dim=0)
return image
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str=0.9_9_9_5):
if not isinstance(UpperCAmelCase__ , np.ndarray):
lowerCamelCase : str = True
lowerCamelCase : Any = va.device
lowerCamelCase : List[Any] = va.cpu().numpy()
lowerCamelCase : List[Any] = va.cpu().numpy()
lowerCamelCase : Optional[Any] = np.sum(va * va / (np.linalg.norm(UpperCAmelCase__) * np.linalg.norm(UpperCAmelCase__)))
if np.abs(UpperCAmelCase__) > DOT_THRESHOLD:
lowerCamelCase : Tuple = (1 - t) * va + t * va
else:
lowerCamelCase : Tuple = np.arccos(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = np.sin(UpperCAmelCase__)
lowerCamelCase : List[Any] = theta_a * t
lowerCamelCase : Tuple = np.sin(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = np.sin(theta_a - theta_t) / sin_theta_a
lowerCamelCase : Tuple = sin_theta_t / sin_theta_a
lowerCamelCase : Any = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase : Union[str, Any] = torch.from_numpy(UpperCAmelCase__).to(UpperCAmelCase__)
return va
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]):
lowerCamelCase : Optional[Any] = F.normalize(UpperCAmelCase__ , dim=-1)
lowerCamelCase : List[str] = F.normalize(UpperCAmelCase__ , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]):
for param in model.parameters():
lowerCamelCase : Union[str, Any] = value
class __snake_case ( a__):
def __init__( self, A, A, A, A, A, A, A, A=None, A=None, A=None, ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A, text_encoder=A, clip_model=A, tokenizer=A, unet=A, scheduler=A, feature_extractor=A, coca_model=A, coca_tokenizer=A, coca_transform=A, )
lowerCamelCase : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size, A )
else feature_extractor.size['shortest_edge']
)
lowerCamelCase : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, A )
set_requires_grad(self.clip_model, A )
def UpperCAmelCase_ ( self, A = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.enable_attention_slicing(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
set_requires_grad(self.vae, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
set_requires_grad(self.vae, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
set_requires_grad(self.unet, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
set_requires_grad(self.unet, A )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Any = min(int(num_inference_steps * strength ), A )
lowerCamelCase : str = max(num_inference_steps - init_timestep, 0 )
lowerCamelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self, A, A, A, A, A, A=None ):
"""simple docstring"""
if not isinstance(A, torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
lowerCamelCase : Optional[Any] = image.to(device=A, dtype=A )
if isinstance(A, A ):
lowerCamelCase : Union[str, Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
lowerCamelCase : Any = torch.cat(A, dim=0 )
else:
lowerCamelCase : List[str] = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase : Optional[Any] = 0.1_8215 * init_latents
lowerCamelCase : Tuple = init_latents.repeat_interleave(A, dim=0 )
lowerCamelCase : Any = randn_tensor(init_latents.shape, generator=A, device=A, dtype=A )
# get latents
lowerCamelCase : Optional[Any] = self.scheduler.add_noise(A, A, A )
lowerCamelCase : int = init_latents
return latents
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase : int = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
lowerCamelCase : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>', '' ).rstrip(' .,' )
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : Tuple = self.feature_extractor.preprocess(A )
lowerCamelCase : str = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase : Dict = self.clip_model.get_image_features(A )
lowerCamelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=A )
lowerCamelCase : List[str] = image_embeddings_clip.repeat_interleave(A, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase_ ( self, A, A, A, A, A, A, A, ):
"""simple docstring"""
lowerCamelCase : Tuple = latents.detach().requires_grad_()
lowerCamelCase : List[str] = self.scheduler.scale_model_input(A, A )
# predict the noise residual
lowerCamelCase : int = self.unet(A, A, encoder_hidden_states=A ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase : List[str] = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase : Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase : Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase : Dict = torch.sqrt(A )
lowerCamelCase : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, A ):
lowerCamelCase : Union[str, Any] = self.scheduler.sigmas[index]
lowerCamelCase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase : Optional[Any] = 1 / 0.1_8215 * sample
lowerCamelCase : List[Any] = self.vae.decode(A ).sample
lowerCamelCase : Dict = (image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Any = transforms.Resize(self.feature_extractor_size )(A )
lowerCamelCase : Tuple = self.normalize(A ).to(latents.dtype )
lowerCamelCase : List[Any] = self.clip_model.get_image_features(A )
lowerCamelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=A )
lowerCamelCase : Union[str, Any] = spherical_dist_loss(A, A ).mean() * clip_guidance_scale
lowerCamelCase : Union[str, Any] = -torch.autograd.grad(A, A )[0]
if isinstance(self.scheduler, A ):
lowerCamelCase : str = latents.detach() + grads * (sigma**2)
lowerCamelCase : Optional[int] = noise_pred_original
else:
lowerCamelCase : Optional[int] = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, A, A, A = None, A = None, A = 512, A = 512, A = 0.6, A = 50, A = 7.5, A = 1, A = 0.0, A = 100, A = None, A = "pil", A = True, A = 0.8, A = 0.1, A = 0.1, ):
"""simple docstring"""
if isinstance(A, A ) and len(A ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A, torch.Generator ) and batch_size > 1:
lowerCamelCase : Union[str, Any] = [generator] + [None] * (batch_size - 1)
lowerCamelCase : List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowerCamelCase : Dict = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase : str = ', '.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase : List[str] = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase : Optional[int] = self.get_image_description(A )
# get prompt text embeddings for content and style
lowerCamelCase : Optional[int] = self.tokenizer(
A, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=A, return_tensors='pt', )
lowerCamelCase : int = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase : Tuple = self.tokenizer(
A, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=A, return_tensors='pt', )
lowerCamelCase : str = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase : Tuple = slerp(A, A, A )
# duplicate text embeddings for each generation per prompt
lowerCamelCase : Any = text_embeddings.repeat_interleave(A, dim=0 )
# set timesteps
lowerCamelCase : str = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase : Optional[int] = {}
if accepts_offset:
lowerCamelCase : Optional[int] = 1
self.scheduler.set_timesteps(A, **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase , lowerCamelCase : Optional[int] = self.get_timesteps(A, A, self.device )
lowerCamelCase : List[str] = timesteps[:1].repeat(A )
# Preprocess image
lowerCamelCase : List[str] = preprocess(A, A, A )
lowerCamelCase : List[str] = self.prepare_latents(
A, A, A, text_embeddings.dtype, self.device, A )
lowerCamelCase : int = preprocess(A, A, A )
lowerCamelCase : Dict = self.prepare_latents(
A, A, A, text_embeddings.dtype, self.device, A )
lowerCamelCase : Optional[int] = slerp(A, A, A )
if clip_guidance_scale > 0:
lowerCamelCase : Optional[Any] = self.get_clip_image_embeddings(A, A )
lowerCamelCase : Optional[Any] = self.get_clip_image_embeddings(A, A )
lowerCamelCase : Any = slerp(
A, A, A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase : Union[str, Any] = content_text_input.input_ids.shape[-1]
lowerCamelCase : List[Any] = self.tokenizer([''], padding='max_length', max_length=A, return_tensors='pt' )
lowerCamelCase : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase : Tuple = uncond_embeddings.repeat_interleave(A, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase : List[str] = torch.randn(A, generator=A, device='cpu', dtype=A ).to(
self.device )
else:
lowerCamelCase : Union[str, Any] = torch.randn(A, generator=A, device=self.device, dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : Optional[int] = {}
if accepts_eta:
lowerCamelCase : List[Any] = eta
# check if the scheduler accepts generator
lowerCamelCase : List[str] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase : Union[str, Any] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Optional[int] = self.scheduler.scale_model_input(A, A )
# predict the noise residual
lowerCamelCase : List[Any] = self.unet(A, A, encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase : List[str] = noise_pred.chunk(2 )
lowerCamelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase , lowerCamelCase : str = self.cond_fn(
A, A, A, A, A, A, A, )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : int = self.scheduler.step(A, A, A, **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase : Optional[Any] = 1 / 0.1_8215 * latents
lowerCamelCase : Dict = self.vae.decode(A ).sample
lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Any = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A, nsfw_content_detected=A )
| 320 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __snake_case ( a__):
def __init__( self, A = 101 ):
"""simple docstring"""
lowerCamelCase : int = length
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self, A ):
"""simple docstring"""
return i
class __snake_case :
def __call__( self, A ):
"""simple docstring"""
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class __snake_case ( nn.Module):
def __init__( self ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase : str = nn.Linear(120, 80 )
def UpperCAmelCase_ ( self, A, A=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class __snake_case ( a__):
@require_torch_neuroncore
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase : int = F'''--output_dir {output_dir}'''.split()
lowerCamelCase : Optional[Any] = ['torchrun'] + distributed_args + args
execute_subprocess_async(A, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __snake_case ( a__):
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowerCamelCase : int = self.get_auto_remove_tmp_dir()
lowerCamelCase : Optional[Any] = F'''--output_dir {output_dir}'''.split()
lowerCamelCase : str = ['torchrun'] + distributed_args + args
execute_subprocess_async(A, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A = HfArgumentParser((TrainingArguments,))
A = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
A = DummyDataset(dataset_length)
def UpperCAmelCase ( UpperCAmelCase__ : EvalPrediction):
lowerCamelCase : Union[str, Any] = list(range(len(UpperCAmelCase__)))
lowerCamelCase : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''')
return {"success": success}
A = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = 2
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = None
| 320 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A_ = True
except (ImportError, AttributeError):
A_ = object
def __UpperCamelCase ( *a, **a) ->List[str]:
pass
A_ = False
A_ = logging.get_logger("transformers-cli/serving")
def __UpperCamelCase ( a) ->Union[str, Any]:
lowerCamelCase__ = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(a, args.host, args.port, args.workers)
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = 4_2
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = 4_2
A__ = 4_2
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = 4_2
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = 4_2
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@staticmethod
def __magic_name__ ( _lowerCAmelCase ):
lowerCamelCase__ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=_lowerCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=_lowerCAmelCase , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=_lowerCAmelCase , default=8888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=_lowerCAmelCase , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=_lowerCAmelCase , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=_lowerCAmelCase , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=_lowerCAmelCase , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=_lowerCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = pipeline
lowerCamelCase__ = host
lowerCamelCase__ = port
lowerCamelCase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F"Serving model over {host}:{port}" )
lowerCamelCase__ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["POST"] , ),
] , timeout=600 , )
def __magic_name__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__ ( self , _lowerCAmelCase = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) ):
try:
lowerCamelCase__ = self._pipeline.tokenizer.tokenize(_lowerCAmelCase )
if return_ids:
lowerCamelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
return ServeTokenizeResult(tokens=_lowerCAmelCase , tokens_ids=_lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=_lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(_lowerCAmelCase )} )
def __magic_name__ ( self , _lowerCAmelCase = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , ):
try:
lowerCamelCase__ = self._pipeline.tokenizer.decode(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return ServeDeTokenizeResult(model="" , text=_lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(_lowerCAmelCase )} )
async def __magic_name__ ( self , _lowerCAmelCase=Body(_lowerCAmelCase , embed=_lowerCAmelCase ) ):
# Check we don't have empty string
if len(_lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCamelCase__ = self._pipeline(_lowerCAmelCase )
return ServeForwardResult(output=_lowerCAmelCase )
except Exception as e:
raise HTTPException(500 , {"error": str(_lowerCAmelCase )} )
| 706 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
A__ = ViTImageProcessor if is_vision_available() else None
@property
def __magic_name__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ):
lowerCamelCase__ = (3, 32, 128)
lowerCamelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowerCamelCase__ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
lowerCamelCase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
lowerCamelCase__ = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self , **_lowerCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __magic_name__ ( self , **_lowerCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowerCamelCase__ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowerCamelCase__ = Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) )
return image_input
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowerCamelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase , return_tensors="np" )
lowerCamelCase__ = processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCamelCase__ = "test"
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCamelCase__ = "test"
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.char_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCamelCase__ = None
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCamelCase__ = torch.randn(1 , 27 , 38 )
lowerCamelCase__ = torch.randn(1 , 27 , 5_0257 )
lowerCamelCase__ = torch.randn(1 , 27 , 3_0522 )
lowerCamelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 360 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def UpperCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float):
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if inductance < 0:
raise ValueError('Inductance cannot be negative')
if frequency < 0:
raise ValueError('Frequency cannot be negative')
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative')
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 |
'''simple docstring'''
A = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 320 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ConsistencyModelPipeline
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if class_cond:
snake_case: Dict = self.dummy_cond_unet
else:
snake_case: List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case: Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: int = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Union[str, Any] = self.get_dummy_components()
snake_case: Tuple = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: str = image[0, -3:, -3:, -1]
snake_case: List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 0
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: List[Any] = self.get_dummy_components()
snake_case: Dict = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = 1
snake_case: Dict = None
snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: str = image[0, -3:, -3:, -1]
snake_case: List[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Optional[int] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 1
snake_case: int = None
snake_case: Optional[int] = 0
snake_case: Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: List[str] = image[0, -3:, -3:, -1]
snake_case: Optional[int] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
snake_case: str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
snake_case: Any = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = latents
return inputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
snake_case: Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Optional[int] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs()
snake_case: Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Tuple = image[0, -3:, -3:, -1]
snake_case: Optional[Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Optional[Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs()
snake_case: Union[str, Any] = 1
snake_case: List[str] = None
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[int] = image[0, -3:, -3:, -1]
snake_case: str = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: str = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
snake_case: Dict = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 1
snake_case: Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: List[str] = image[0, -3:, -3:, -1]
snake_case: Union[str, Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 692 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 1 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( _UpperCAmelCase ):
def __init__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , **a_ , ) -> List[Any]:
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
_UpperCAmelCase = field
_UpperCAmelCase = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
_UpperCAmelCase = Json(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , field=lowerCamelCase_ , **lowerCamelCase_ , )
def _a ( self ) -> str:
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__( self , a_ , a_ , a_ = None , a_ = None , **a_ , ) -> List[str]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase = num_proc
_UpperCAmelCase = '''utf-8'''
_UpperCAmelCase = to_json_kwargs
def _a ( self ) -> int:
_UpperCAmelCase = self.to_json_kwargs.pop("path_or_buf" , lowerCamelCase_ )
_UpperCAmelCase = self.to_json_kwargs.pop("orient" , "records" )
_UpperCAmelCase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
_UpperCAmelCase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
_UpperCAmelCase = self.to_json_kwargs.pop("compression" , lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=lowerCamelCase_ ) as buffer:
_UpperCAmelCase = self._write(file_obj=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead." )
_UpperCAmelCase = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
return written
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = args
_UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **lowerCamelCase_ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a ( self , a_ , a_ , a_ , a_ , **a_ , ) -> int:
_UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
_UpperCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
_UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase_ , lowerCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(lowerCamelCase_ )
return written
| 657 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
'''simple docstring'''
from math import sqrt
def A_( A : int):
assert isinstance(A , A) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase = False
for divisor in range(2 , int(round(sqrt(A))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase = False
break
# precondition
assert isinstance(A , A), "'status' must been from type bool"
return status
def A_( A : List[str]):
assert isinstance(A , A) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase = list(range(2 , n + 1))
UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A)):
for j in range(i + 1 , len(A)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase = 0
# filters actual prime numbers.
UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A , A), "'ans' must been from type list"
return ans
def A_( A : Dict):
assert isinstance(A , A) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1):
if is_prime(A):
ans.append(A)
# precondition
assert isinstance(A , A), "'ans' must been from type list"
return ans
def A_( A : List[str]):
assert isinstance(A , A) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase = 2
UpperCamelCase = number
if number == 0 or number == 1:
ans.append(A)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A):
while quotient != 1:
if is_prime(A) and (quotient % factor == 0):
ans.append(A)
quotient /= factor
else:
factor += 1
else:
ans.append(A)
# precondition
assert isinstance(A , A), "'ans' must been from type list"
return ans
def A_( A : str):
assert isinstance(A , A) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase = 0
# prime factorization of 'number'
UpperCamelCase = prime_factorization(A)
UpperCamelCase = max(A)
# precondition
assert isinstance(A , A), "'ans' must been from type int"
return ans
def A_( A : int):
assert isinstance(A , A) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase = 0
# prime factorization of 'number'
UpperCamelCase = prime_factorization(A)
UpperCamelCase = min(A)
# precondition
assert isinstance(A , A), "'ans' must been from type int"
return ans
def A_( A : Tuple):
assert isinstance(A , A), "'number' must been an int"
assert isinstance(number % 2 == 0 , A), "compare bust been from type bool"
return number % 2 == 0
def A_( A : str):
assert isinstance(A , A), "'number' must been an int"
assert isinstance(number % 2 != 0 , A), "compare bust been from type bool"
return number % 2 != 0
def A_( A : Optional[Any]):
assert (
isinstance(A , A) and (number > 2) and is_even(A)
), "'number' must been an int, even and > 2"
UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase = get_prime_numbers(A)
UpperCamelCase = len(A)
# run variable for while-loops.
UpperCamelCase = 0
UpperCamelCase = None
# exit variable. for break up the loops
UpperCamelCase = True
while i < len_pn and loop:
UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(A , A)
and (len(A) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A_( A : Union[str, Any] , A : str):
assert (
isinstance(A , A)
and isinstance(A , A)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase = 0
while numbera != 0:
UpperCamelCase = numbera % numbera
UpperCamelCase = numbera
UpperCamelCase = rest
# precondition
assert isinstance(A , A) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A_( A : Dict , A : List[Any]):
assert (
isinstance(A , A)
and isinstance(A , A)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase = prime_factorization(A)
UpperCamelCase = prime_factorization(A)
elif numbera == 1 or numbera == 1:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = max(A , A)
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase = prime_fac_a.count(A)
UpperCamelCase = prime_fac_a.count(A)
for _ in range(max(A , A)):
ans *= n
else:
UpperCamelCase = prime_fac_a.count(A)
for _ in range(A):
ans *= n
done.append(A)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase = prime_fac_a.count(A)
for _ in range(A):
ans *= n
done.append(A)
# precondition
assert isinstance(A , A) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A_( A : Dict):
assert isinstance(A , A) and (n >= 0), "'number' must been a positive int"
UpperCamelCase = 0
UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A):
ans += 1
# precondition
assert isinstance(A , A) and is_prime(
A), "'ans' must been a prime number and from type int"
return ans
def A_( A : Optional[int] , A : str):
assert (
is_prime(A) and is_prime(A) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase = p_number_a + 1 # jump to the next number
UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A):
number += 1
while number < p_number_a:
ans.append(A)
number += 1
# fetch the next prime number.
while not is_prime(A):
number += 1
# precondition
assert (
isinstance(A , A)
and ans[0] != p_number_a
and ans[len(A) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A_( A : Optional[Any]):
assert isinstance(A , A) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase = [] # will be returned.
for divisor in range(1 , n + 1):
if n % divisor == 0:
ans.append(A)
# precondition
assert ans[0] == 1 and ans[len(A) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A_( A : Any):
assert isinstance(A , A) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase = get_divisors(A)
# precondition
assert (
isinstance(A , A)
and (divisors[0] == 1)
and (divisors[len(A) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def A_( A : int , A : Any):
assert (
isinstance(A , A)
and isinstance(A , A)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase = gcd(abs(A) , abs(A))
# precondition
assert (
isinstance(A , A)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A_( A : Dict):
assert isinstance(A , A) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase = 1 # this will be return.
for factor in range(1 , n + 1):
ans *= factor
return ans
def A_( A : Any):
assert isinstance(A , A) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 1 # this will be return
for _ in range(n - 1):
UpperCamelCase = ans
ans += fiba
UpperCamelCase = tmp
return ans
| 717 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_ = None , )-> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
UpperCamelCase = int(A_ )
UpperCamelCase = dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self , A_ )-> List[int]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
UpperCamelCase = list(A_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , A_ , A_ = 4.0 , A_ = None , A_ = 50 , A_ = "pil" , A_ = True , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCamelCase = len(A_ )
UpperCamelCase = self.transformer.config.sample_size
UpperCamelCase = self.transformer.config.in_channels
UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase = torch.tensor(A_ , device=self.device ).reshape(-1 )
UpperCamelCase = torch.tensor([1000] * batch_size , device=self.device )
UpperCamelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase = latent_model_input[: len(A_ ) // 2]
UpperCamelCase = torch.cat([half, half] , dim=0 )
UpperCamelCase = self.scheduler.scale_model_input(A_ , A_ )
UpperCamelCase = t
if not torch.is_tensor(A_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase = latent_model_input.device.type == 'mps'
if isinstance(A_ , A_ ):
UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase = torch.intaa if is_mps else torch.intaa
UpperCamelCase = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase = self.transformer(
A_ , timestep=A_ , class_labels=A_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase , UpperCamelCase = torch.split(A_ , len(A_ ) // 2 , dim=0 )
UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase , UpperCamelCase = torch.split(A_ , A_ , dim=1 )
else:
UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , A_ , A_ ).prev_sample
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase = latent_model_input
UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase = self.vae.decode(A_ ).sample
UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A_ )
| 432 | 0 |
"""simple docstring"""
import pytest
a_ = "__dummy_dataset1__"
a_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : Tuple = dataset_loading_script_name
snake_case_ : str = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=_snake_case )
snake_case_ : Union[str, Any] = script_dir / f'{script_name}.py'
with open(_snake_case , """w""" ) as f:
f.write(_snake_case )
return str(_snake_case )
| 480 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case : List[Any] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 124 | 0 |
import os
from pathlib import Path
def __UpperCamelCase ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 405 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _a):
@require_torch
def _UpperCAmelCase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Optional[int] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = "\nfrom transformers import pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCAmelCase = self.get_env()
UpperCAmelCase = "1"
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" ,result.stderr.decode().replace("\n" ,"" ) ,)
@require_torch
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = "\nfrom transformers import AutoModel\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
| 405 | 1 |
import random
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ = False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {i: [] for i in range(lowerCAmelCase_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCAmelCase_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCAmelCase_ ):
for j in range(i + 1 , lowerCAmelCase_ ):
if random.random() < probability:
graph[i].append(lowerCAmelCase_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCAmelCase_ )
return graph
def __magic_name__ ( lowercase_ ) -> str:
'''simple docstring'''
return {
i: [j for j in range(lowerCAmelCase_ ) if i != j] for i in range(lowerCAmelCase_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 606 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , lowerCAmelCase__=1 / 2_5_5 , lowerCAmelCase__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCAmelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Any = min_resolution
_UpperCAmelCase : Dict = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : str = image_mean
_UpperCAmelCase : str = image_std
_UpperCAmelCase : Dict = do_rescale
_UpperCAmelCase : Tuple = rescale_factor
_UpperCAmelCase : Tuple = do_pad
def snake_case_ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=False ):
if not batched:
_UpperCAmelCase : Tuple = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
_UpperCAmelCase : List[Any] = self.size["""shortest_edge"""]
elif w > h:
_UpperCAmelCase : List[Any] = self.size["""shortest_edge"""]
_UpperCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
_UpperCAmelCase : Union[str, Any] = self.size["""shortest_edge"""]
_UpperCAmelCase : Dict = self.size["""shortest_edge"""]
else:
_UpperCAmelCase : Any = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCAmelCase : Any = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : List[Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_rescale""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCAmelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case_ (self ):
pass
def snake_case_ (self ):
# Initialize image_processing
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ (self ):
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ (self ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ (self ):
# prepare image and target
_UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_UpperCAmelCase : Union[str, Any] = json.loads(f.read() )
_UpperCAmelCase : List[str] = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
_UpperCAmelCase : Union[str, Any] = DeformableDetrImageProcessor()
_UpperCAmelCase : Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
_UpperCAmelCase : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
_UpperCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCAmelCase : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
_UpperCAmelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
_UpperCAmelCase : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCAmelCase : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCAmelCase : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
_UpperCAmelCase : int = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
@slow
def snake_case_ (self ):
# prepare image, target and masks_path
_UpperCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_UpperCAmelCase : Tuple = json.loads(f.read() )
_UpperCAmelCase : List[str] = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
_UpperCAmelCase : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_UpperCAmelCase : Optional[int] = DeformableDetrImageProcessor(format="""coco_panoptic""" )
_UpperCAmelCase : List[str] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
_UpperCAmelCase : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCAmelCase : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
_UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCAmelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCAmelCase : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify masks
_UpperCAmelCase : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCAmelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
_UpperCAmelCase : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
| 414 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : Callable , snake_case : Optional[Features] = None , snake_case : str = None , snake_case : bool = False , snake_case : bool = False , snake_case : Optional[dict] = None , snake_case : Optional[int] = None , **snake_case : Tuple , ) -> str:
'''simple docstring'''
super().__init__(
features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , num_proc=snake_case , **snake_case , )
__magic_name__ : Dict = Generator(
cache_dir=snake_case , features=snake_case , generator=snake_case , gen_kwargs=snake_case , **snake_case , )
def _UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
if self.streaming:
__magic_name__ : List[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__magic_name__ : Any = None
__magic_name__ : Optional[int] = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : Any = None
self.builder.download_and_prepare(
download_config=snake_case , download_mode=snake_case , verification_mode=snake_case , base_path=snake_case , num_proc=self.num_proc , )
__magic_name__ : Any = self.builder.as_dataset(
split='''train''' , verification_mode=snake_case , in_memory=self.keep_in_memory )
return dataset
| 147 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = 'xlm-prophetnet'
snake_case_ = ['past_key_values']
snake_case_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : Tuple , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 3_0522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : List[str] , ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = vocab_size
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : Any = encoder_ffn_dim
__magic_name__ : str = num_encoder_layers
__magic_name__ : List[str] = num_encoder_attention_heads
__magic_name__ : Dict = decoder_ffn_dim
__magic_name__ : int = num_decoder_layers
__magic_name__ : str = num_decoder_attention_heads
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = init_std # Normal(0, this parameter)
__magic_name__ : Optional[int] = activation_function
# parameters for xlmprophetnet
__magic_name__ : int = ngram
__magic_name__ : List[Any] = num_buckets
__magic_name__ : int = relative_max_distance
__magic_name__ : List[str] = disable_ngram_loss
__magic_name__ : Union[str, Any] = eps
# 3 Types of Dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : Optional[int] = dropout
__magic_name__ : Dict = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self : List[Any] , snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 147 | 1 |
'''simple docstring'''
from collections.abc import Callable
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : Optional[Any] = None ) -> None:
__magic_name__ = []
# Stores indexes of each item for supporting updates and deletion.
__magic_name__ = {}
# Stores current size of heap.
__magic_name__ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__magic_name__ = key or (lambda _lowerCamelCase : x)
def __A ( self : Tuple , _lowerCamelCase : Union[str, Any] ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __A ( self : Dict , _lowerCamelCase : Optional[int] ) -> int | None:
__magic_name__ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __A ( self : Dict , _lowerCamelCase : Tuple ) -> int | None:
__magic_name__ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __A ( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : int ) -> None:
__magic_name__ , __magic_name__ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__magic_name__ , __magic_name__ = self.arr[j], self.arr[i]
def __A ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __A ( self : Optional[Any] , _lowerCamelCase : Any ) -> int:
__magic_name__ = self._left(_lowerCamelCase )
__magic_name__ = self._right(_lowerCamelCase )
__magic_name__ = i
if left is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = left
if right is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = right
return valid_parent
def __A ( self : Optional[Any] , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self._parent(_lowerCamelCase )
while parent is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
self._swap(_lowerCamelCase , _lowerCamelCase )
__magic_name__ , __magic_name__ = parent, self._parent(_lowerCamelCase )
def __A ( self : Any , _lowerCamelCase : List[Any] ) -> None:
__magic_name__ = self._get_valid_parent(_lowerCamelCase )
while valid_parent != index:
self._swap(_lowerCamelCase , _lowerCamelCase )
__magic_name__ , __magic_name__ = valid_parent, self._get_valid_parent(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple ) -> None:
if item not in self.pos_map:
return
__magic_name__ = self.pos_map[item]
__magic_name__ = [item, self.key(_lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def __A ( self : Dict , _lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
__magic_name__ = self.pos_map[item]
del self.pos_map[item]
__magic_name__ = self.arr[self.size - 1]
__magic_name__ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def __A ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ) -> None:
__magic_name__ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_lowerCamelCase )] )
else:
__magic_name__ = [item, self.key(_lowerCamelCase )]
__magic_name__ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __A ( self : List[str] ) -> tuple | None:
return self.arr[0] if self.size else None
def __A ( self : Any ) -> tuple | None:
__magic_name__ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __snake_case ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__a : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 606 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_UpperCAmelCase : str = re.compile(r"\s+")
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowercase__ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def lowerCAmelCase_ (lowercase__ : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [len(lowercase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(lowercase__ ), "line_max": max(lowercase__ )}
def lowerCAmelCase_ (lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Any=5 ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
lowerCAmelCase__ = example['''content'''].splitlines()
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : Any=5 , lowercase__ : Union[str, Any]=0.05 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = ['''unit tests''', '''test file''', '''configuration file''']
lowerCAmelCase__ = example['''content'''].splitlines()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# first test
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCAmelCase__ = example['''content'''].count('''\n''' )
lowerCAmelCase__ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase_ (lowercase__ : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = ['''def ''', '''class ''', '''for ''', '''while ''']
lowerCAmelCase__ = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[Any]=4 ) -> str:
'''simple docstring'''
lowerCAmelCase__ = example['''content'''].splitlines()
lowerCAmelCase__ = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = tokenizer(example['''content'''] , truncation=lowercase__ )['''input_ids''']
lowerCAmelCase__ = len(example['''content'''] ) / len(lowercase__ )
return {"ratio": ratio}
def lowerCAmelCase_ (lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = {}
results.update(get_hash(lowercase__ ) )
results.update(line_stats(lowercase__ ) )
results.update(alpha_stats(lowercase__ ) )
results.update(char_token_ratio(lowercase__ ) )
results.update(is_autogenerated(lowercase__ ) )
results.update(is_config_or_test(lowercase__ ) )
results.update(has_no_keywords(lowercase__ ) )
results.update(has_few_assignments(lowercase__ ) )
return results
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Dict ) -> int:
'''simple docstring'''
if not check_uniques(lowercase__ , lowercase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase_ (lowercase__ : Dict ) -> Any:
'''simple docstring'''
with open(lowercase__ , '''rb''' ) as f_in:
with gzip.open(str(lowercase__ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowercase__ , lowercase__ )
os.unlink(lowercase__ )
# Settings
_UpperCAmelCase : Optional[Any] = HfArgumentParser(PreprocessingArguments)
_UpperCAmelCase : str = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase : Dict = multiprocessing.cpu_count()
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_UpperCAmelCase : Tuple = time.time()
_UpperCAmelCase : Dict = load_dataset(args.dataset_name, split="train")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_UpperCAmelCase : Union[str, Any] = time.time()
_UpperCAmelCase : int = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_UpperCAmelCase : Optional[int] = set(ds.unique("hash"))
_UpperCAmelCase : Optional[Any] = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_UpperCAmelCase : Optional[int] = time.time()
_UpperCAmelCase : Optional[Any] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_UpperCAmelCase : Union[str, Any] = time.time()
_UpperCAmelCase : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_UpperCAmelCase : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
_UpperCAmelCase : Any = output_dir / "data"
data_dir.mkdir(exist_ok=True)
_UpperCAmelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_UpperCAmelCase : Union[str, Any] = str(data_dir / F'''file-{file_number+1:012}.json''')
_UpperCAmelCase : Union[str, Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 714 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : str=7 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=99 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=8 , SCREAMING_SNAKE_CASE_ : Dict=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu_new" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=512 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Dict = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Tuple = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : str ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :int = False
UpperCamelCase_ :Any = False
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 288 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Dict,__A : Optional[int]=1_3,__A : Optional[Any]=3_0,__A : Any=2,__A : Any=3,__A : List[Any]=True,__A : List[str]=True,__A : List[str]=3_2,__A : List[str]=2,__A : List[Any]=4,__A : int=3_7,__A : Any="gelu",__A : Optional[Any]=0.1,__A : List[Any]=0.1,__A : Optional[int]=1_0,__A : Optional[int]=0.02,__A : Any=3,__A : Optional[Any]=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : int = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[Any] = num_patches + 1
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Tuple ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__A,initializer_range=self.initializer_range,)
def lowerCamelCase_ ( self : Any,__A : Any,__A : Dict,__A : Any ):
_lowerCamelCase : int = TFViTModel(config=__A )
_lowerCamelCase : Dict = model(__A,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase : Dict = self.image_size // 2
_lowerCamelCase : Any = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase : int = model(__A,interpolate_pos_encoding=__A,training=__A )
_lowerCamelCase : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any],__A : Any,__A : List[str],__A : Optional[Any] ):
_lowerCamelCase : Optional[int] = self.type_sequence_label_size
_lowerCamelCase : Dict = TFViTForImageClassification(__A )
_lowerCamelCase : Any = model(__A,labels=__A,training=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase : Union[str, Any] = self.image_size // 2
_lowerCamelCase : int = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase : Any = model(__A,interpolate_pos_encoding=__A,training=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Dict = TFViTForImageClassification(__A )
_lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Any = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = TFViTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=__A,has_text_modality=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__A )
_lowerCamelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Tuple ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="tf" )
# forward pass
_lowerCamelCase : Any = model(**__A )
# verify the logits
_lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[int] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3],__A,atol=1e-4 ) | 44 | '''simple docstring'''
import os
def snake_case_ ( __snake_case : List[Any]) -> str:
lowerCAmelCase_ = len(grid[0])
lowerCAmelCase_ = len(__snake_case)
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__snake_case):
for j in range(n_rows - 3):
lowerCAmelCase_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase_ = max(
__snake_case , __snake_case , __snake_case , __snake_case)
if max_product > largest:
lowerCAmelCase_ = max_product
return largest
def snake_case_ ( ) -> List[Any]:
lowerCAmelCase_ = []
with open(os.path.dirname(__snake_case) + '''/grid.txt''') as file:
for line in file:
grid.append(line.strip('''\n''').split(''' '''))
lowerCAmelCase_ = [[int(__snake_case) for i in grid[j]] for j in range(len(__snake_case))]
return largest_product(__snake_case)
if __name__ == "__main__":
print(solution())
| 274 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
_SCREAMING_SNAKE_CASE = object()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ ) + 1 ):
_lowerCAmelCase = [x.match(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE_ ):
return True
return False
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return replacement
return val
return replace
def __a():
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE_ )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = _get_partition_rules()
_lowerCAmelCase = _replacement_rules(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE_ )}
_lowerCAmelCase = {k: replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE_ ) )
| 489 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
_lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 489 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def UpperCamelCase__ ( ):
__lowerCamelCase : Any = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowerCamelCase : int = get_sagemaker_input()
else:
__lowerCamelCase : Optional[int] = get_cluster_input()
return config
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__=None ):
if subparsers is not None:
__lowerCamelCase : int = subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Tuple = argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = get_user_input()
if args.config_file is not None:
__lowerCamelCase : List[Any] = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE__ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE__ )
print(f'accelerate configuration saved at {config_file}' )
def UpperCamelCase__ ( ):
__lowerCamelCase : Optional[Any] = config_command_parser()
__lowerCamelCase : List[Any] = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 669 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 1 |
'''simple docstring'''
from collections import deque
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = process_name # process name
__lowerCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowerCAmelCase = arrival_time
__lowerCAmelCase = burst_time # remaining burst time
__lowerCAmelCase = 0 # total time of the process wait in ready queue
__lowerCAmelCase = 0 # time from arrival time to completion time
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , ):
# total number of mlfq's queues
__lowerCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__lowerCAmelCase = time_slices
# unfinished process is in this ready_queue
__lowerCAmelCase = queue
# current time
__lowerCAmelCase = current_time
# finished process is in this sequence queue
__lowerCAmelCase = deque()
def _snake_case (self ):
__lowerCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for i in range(len(__lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for i in range(len(__lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for i in range(len(__lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case (self , __lowercase ):
return [q.burst_time for q in queue]
def _snake_case (self , __lowercase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case (self , __lowercase ):
__lowerCAmelCase = deque() # sequence deque of finished process
while len(__lowercase ) != 0:
__lowerCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowerCAmelCase = 0
# set the process's turnaround time because it is finished
__lowerCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__lowerCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(__lowercase )
self.finish_queue.extend(__lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowercase ) ):
__lowerCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowerCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowerCAmelCase = 0
# set the finish time
__lowerCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__lowerCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowercase )
self.finish_queue.extend(__lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case (self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__lowerCAmelCase , __lowerCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[str] = Process("""P1""", 0, 5_3)
_UpperCAmelCase : Any = Process("""P2""", 0, 1_7)
_UpperCAmelCase : int = Process("""P3""", 0, 6_8)
_UpperCAmelCase : List[str] = Process("""P4""", 0, 2_4)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Optional[Any] = [1_7, 2_5]
_UpperCAmelCase : Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Optional[Any] = Process("""P1""", 0, 5_3)
_UpperCAmelCase : List[Any] = Process("""P2""", 0, 1_7)
_UpperCAmelCase : Optional[int] = Process("""P3""", 0, 6_8)
_UpperCAmelCase : List[Any] = Process("""P4""", 0, 2_4)
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : int = [1_7, 2_5]
_UpperCAmelCase : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : List[str] = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 474 |
'''simple docstring'''
from string import ascii_uppercase
_UpperCAmelCase : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Optional[int] = dict(enumerate(ascii_uppercase))
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = 0
while True:
if x == i:
__lowerCAmelCase = 0
if len(lowerCamelCase) == len(lowerCamelCase):
break
key += key[i]
i += 1
return key
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__( ):
__lowerCAmelCase = '''THE GERMAN ATTACK'''
__lowerCAmelCase = '''SECRET'''
__lowerCAmelCase = generate_key(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = cipher_text(lowerCamelCase, lowerCamelCase)
print(F"""Encrypted Text = {s}""")
print(F"""Original Text = {original_text(lowerCamelCase, lowerCamelCase)}""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 474 | 1 |
from ... import PretrainedConfig
__UpperCamelCase : int = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case :Dict = 'nezha'
def __init__( self : int , _lowerCAmelCase : List[Any]=2_1128 , _lowerCAmelCase : Tuple=768 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : List[Any]=64 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=True , **_lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = max_relative_position
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout
__lowercase = use_cache
| 80 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''naver-clova-ix/donut-base-finetuned-docvqa'''
SCREAMING_SNAKE_CASE = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
SCREAMING_SNAKE_CASE = '''document_qa'''
SCREAMING_SNAKE_CASE = AutoProcessor
SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE = ['''image''', '''text''']
SCREAMING_SNAKE_CASE = ['''text''']
def __init__( self ,*A ,**A ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A ,**A )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
UpperCAmelCase = task_prompt.replace("""{user_input}""" ,A )
UpperCAmelCase = self.pre_processor.tokenizer(
A ,add_special_tokens=A ,return_tensors="""pt""" ).input_ids
UpperCAmelCase = self.pre_processor(A ,return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCamelCase ( self ,A ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) ,decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=A ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=A ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=A ,).sequences
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.pre_processor.batch_decode(A )[0]
UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token ,"""""" )
UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token ,"""""" )
UpperCAmelCase = re.sub(r"""<.*?>""" ,"""""" ,A ,count=1 ).strip() # remove first task start token
UpperCAmelCase = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 74 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Optional[Any] = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 598 |
from __future__ import annotations
def a_ ( __magic_name__ ) -> list[int]: # This function is recursive
"""simple docstring"""
snake_case : Union[str, Any] = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case : Union[str, Any] = array[0]
snake_case : Optional[Any] = False
snake_case : Dict = 1
snake_case : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case : int = True
snake_case : Any = [element for element in array[i:] if element >= array[i]]
snake_case : Tuple = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
snake_case : List[Any] = temp_array
else:
i += 1
snake_case : int = [element for element in array[1:] if element >= pivot]
snake_case : int = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__SCREAMING_SNAKE_CASE = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__SCREAMING_SNAKE_CASE = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__SCREAMING_SNAKE_CASE = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __lowerCAmelCase ( self : Tuple , A__ : List[Any] , A__ : Tuple , A__ : List[Any]=None , A__ : Dict=False , A__ : Optional[int]=False , A__ : Any=False , ) -> str:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a__ : Dict = np.array([re.sub(lowercase__ , '''''' , lowercase__ ) for x in predictions] )
a__ : Optional[int] = np.array([re.sub(lowercase__ , '''''' , lowercase__ ) for x in references] )
else:
a__ : Tuple = np.asarray(lowercase__ )
a__ : Dict = np.asarray(lowercase__ )
if ignore_case:
a__ : Dict = np.char.lower(lowercase__ )
a__ : Dict = np.char.lower(lowercase__ )
if ignore_punctuation:
a__ : Optional[Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a__ : int = np.char.translate(lowercase__ , table=lowercase__ )
a__ : Tuple = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
a__ : Any = string.digits.maketrans('''''' , '''''' , string.digits )
a__ : Optional[Any] = np.char.translate(lowercase__ , table=lowercase__ )
a__ : str = np.char.translate(lowercase__ , table=lowercase__ )
a__ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 1_0_0}
| 703 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : Union[str, Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
if "model" in sd.keys():
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
a__ : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCAmelCase__ )
a__ : Any = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Dict = sd.pop(lowerCAmelCase__ )
a__ : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
a__ : Optional[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
a__ : List[str] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
a__ : Optional[int] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
a__ : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Optional[int] = torch.split(lowerCAmelCase__ , depth // 3 , dim=0 )
a__ : Tuple = q
a__ : Union[str, Any] = k
a__ : Dict = v
del sd[key]
return sd
@torch.no_grad()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ):
a__ : Any = load_checkpoint(lowerCAmelCase__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Union[str, Any] = OPTConfig()
a__ : Union[str, Any] = OPTModel(lowerCAmelCase__ ).half().eval()
model.load_state_dict(lowerCAmelCase__ )
# Check results
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 340 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case = threading.Lock()
snake_case = None
snake_case = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
snake_case = logging.WARNING
snake_case = True
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = os.getenv("TRANSFORMERS_VERBOSITY" , _UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCamelCase__ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def lowerCamelCase__ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowerCamelCase__ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE : int = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE : Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE : Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE : Optional[Any] = False
def lowerCamelCase__ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE : Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE : Dict = None
def lowerCamelCase__ ( ):
"""simple docstring"""
return log_levels
def lowerCamelCase__ ( lowercase = None ):
"""simple docstring"""
if name is None:
SCREAMING_SNAKE_CASE : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCamelCase__ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCamelCase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
_configure_library_root_logger()
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase__ ( ):
"""simple docstring"""
_configure_library_root_logger()
SCREAMING_SNAKE_CASE : List[str] = True
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE : int = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCamelCase )
def lowerCamelCase__ ( self , *lowercase , **lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*_UpperCamelCase , **_UpperCamelCase )
snake_case = warning_advice
@functools.lru_cache(_UpperCamelCase )
def lowerCamelCase__ ( self , *lowercase , **lowercase ):
"""simple docstring"""
self.warning(*_UpperCamelCase , **_UpperCamelCase )
snake_case = warning_once
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE : Tuple = args[0] if args else None
def __iter__( self : List[str] ):
return iter(self._iterator )
def __getattr__( self : List[Any] , UpperCAmelCase_ : Dict ):
def empty_fn(*UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
return self
def __exit__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
return
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm(*A_ , **A_ )
else:
return EmptyTqdm(*A_ , **A_ )
def _A ( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ , **A_ )
def _A ( self : Optional[int] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case = _tqdm_cls()
def lowerCamelCase__ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase__ ( ):
"""simple docstring"""
global _tqdm_active
SCREAMING_SNAKE_CASE : str = True
hf_hub_utils.enable_progress_bars()
def lowerCamelCase__ ( ):
"""simple docstring"""
global _tqdm_active
SCREAMING_SNAKE_CASE : Union[str, Any] = False
hf_hub_utils.disable_progress_bars()
| 62 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = BigBirdConfig.from_json_file(_UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_lowercase: Union[str, Any] = BigBirdForQuestionAnswering(_UpperCamelCase )
else:
_lowercase: Dict = BigBirdForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCamelCase , _UpperCamelCase , is_trivia_qa=_UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
A__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 353 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _a :
def __init__( self : int , lowercase : Tuple ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
UpperCAmelCase = deepcopy(lowercase )
elif os.path.exists(lowercase ):
with io.open(lowercase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.load(lowercase )
else:
try:
UpperCAmelCase = baseaa.urlsafe_baadecode(lowercase ).decode('''utf-8''' )
UpperCAmelCase = json.loads(lowercase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
UpperCAmelCase = config
self.set_stage_and_offload()
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_value('''zero_optimization.stage''' , -1 )
# offload
UpperCAmelCase = False
if self.is_zeroa() or self.is_zeroa():
UpperCAmelCase = set(['''cpu''', '''nvme'''] )
UpperCAmelCase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
UpperCAmelCase = True
def A ( self : Dict , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = self.config
# find the config node of interest if it exists
UpperCAmelCase = ds_key_long.split('''.''' )
UpperCAmelCase = nodes.pop()
for node in nodes:
UpperCAmelCase = config.get(lowercase )
if config is None:
return None, ds_key
return config, ds_key
def A ( self : str , lowercase : Dict , lowercase : List[Any]=None ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.find_config_node(lowercase )
if config is None:
return default
return config.get(lowercase , lowercase )
def A ( self : List[Any] , lowercase : int , lowercase : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase = self.config
# find the config node of interest if it exists
UpperCAmelCase = ds_key_long.split('''.''' )
for node in nodes:
UpperCAmelCase = config
UpperCAmelCase = config.get(lowercase )
if config is None:
if must_exist:
raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase )
def A ( self : Dict , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = self.get_value(lowercase )
return False if value is None else bool(lowercase )
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_value(lowercase )
return False if value is None else not bool(lowercase )
def A ( self : Tuple ):
'''simple docstring'''
return self._stage == 2
def A ( self : Any ):
'''simple docstring'''
return self._stage == 3
def A ( self : Optional[Any] ):
'''simple docstring'''
return self._offload
class _a :
def __init__( self : Tuple , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = engine
def A ( self : List[Any] , lowercase : Dict , **lowercase : List[str] ):
'''simple docstring'''
self.engine.backward(lowercase , **lowercase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _a ( __a ):
def __init__( self : Optional[int] , lowercase : str ):
'''simple docstring'''
super().__init__(lowercase , device_placement=lowercase , scaler=lowercase )
UpperCAmelCase = hasattr(self.optimizer , '''overflow''' )
def A ( self : Optional[int] , lowercase : Optional[Any]=None ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def A ( self : List[Any] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def A ( self : List[Any] ):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _a ( __a ):
def __init__( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _a :
def __init__( self : Tuple , lowercase : List[str] , lowercase : Dict=0.001 , lowercase : int=0 , **lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = params
UpperCAmelCase = lr
UpperCAmelCase = weight_decay
UpperCAmelCase = kwargs
class _a :
def __init__( self : str , lowercase : List[str] , lowercase : List[Any]=None , lowercase : Dict=0 , **lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = optimizer
UpperCAmelCase = total_num_steps
UpperCAmelCase = warmup_num_steps
UpperCAmelCase = kwargs
| 701 |
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = 0
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json'''
_snake_case = Path(__lowerCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) )
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json'''
_snake_case = Path(__lowerCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) )
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json'''
_snake_case = Path(__lowerCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_snake_case = CLIPImageProcessor(**__lowerCamelCase )
# save in new folder
model_config.save_pretrained(__lowerCamelCase )
config.save_pretrained(__lowerCamelCase )
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase )
# make sure private variable is not incorrectly saved
_snake_case = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , )
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_snake_case = AutoImageProcessor.from_pretrained('''clip-base''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
_snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
_snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase )
_snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowerCamelCase )
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoImageProcessor.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoImageProcessor.register(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json'''
_snake_case = Path(__lowerCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) )
_snake_case = CustomImageProcessor.from_pretrained(__lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowerCamelCase )
_snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : int = True
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoImageProcessor.register(__lowerCamelCase , __lowerCamelCase )
# If remote code is not set, the default is to use local
_snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__lowerCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 103 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''bert-generation'''
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=5_0_3_5_8 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=2_4 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Tuple=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 103 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaTokenizer
UpperCAmelCase_ = XLMRobertaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def A_ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "<pad>"
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(_UpperCAmelCase ), 1_0_0_2 )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_2 )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase, _UpperCAmelCase )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase, _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(_UpperCAmelCase, legacy_format=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase, _UpperCAmelCase )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase, _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(_UpperCAmelCase, legacy_format=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase, _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase, f.name )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(f.name, keep_accents=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def A_ ( self : str ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
@slow
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "Hello World!"
SCREAMING_SNAKE_CASE__ = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def A_ ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
SCREAMING_SNAKE_CASE__ = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def A_ ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"input_ids": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="xlm-roberta-base", revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3", )
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class __A ( unittest.TestCase ):
def _snake_case (self ):
lowerCamelCase__ : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase__ : int = [2, 4, 6, 8, 10, 12]
lowerCamelCase__ : Dict = 100
self.assertEqual(kp.calc_profit(__magic_name__ , __magic_name__ , __magic_name__ ) , 210 )
def _snake_case (self ):
self.assertRaisesRegex(__magic_name__ , """max_weight must greater than zero.""" )
def _snake_case (self ):
self.assertRaisesRegex(__magic_name__ , """Weight can not be negative.""" )
def _snake_case (self ):
self.assertRaisesRegex(__magic_name__ , """Profit can not be negative.""" )
def _snake_case (self ):
self.assertRaisesRegex(__magic_name__ , """max_weight must greater than zero.""" )
def _snake_case (self ):
self.assertRaisesRegex(
__magic_name__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 157 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 157 | 1 |
from __future__ import annotations
from typing import Any
class a__ ( _snake_case ):
"""simple docstring"""
pass
class a__ :
"""simple docstring"""
def __init__( self :Dict , lowercase__ :Any ):
lowercase = data
lowercase = None
def __iter__( self :List[Any] ):
lowercase = self
lowercase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowercase__ )
yield node.data
lowercase = node.next_node
@property
def __UpperCAmelCase ( self :Tuple ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__magic_name__ = Node(1)
__magic_name__ = Node(2)
__magic_name__ = Node(3)
__magic_name__ = Node(4)
print(root_node.has_loop) # False
__magic_name__ = root_node.next_node
print(root_node.has_loop) # True
__magic_name__ = Node(5)
__magic_name__ = Node(6)
__magic_name__ = Node(5)
__magic_name__ = Node(6)
print(root_node.has_loop) # False
__magic_name__ = Node(1)
print(root_node.has_loop) # False
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 | 0 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowercase = get_logger(__name__)
_lowercase = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class UpperCAmelCase_ :
'''simple docstring'''
@add_start_docstrings(_lowercase )
def __call__( self , _lowercase , _lowercase ):
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ :
'''simple docstring'''
@add_start_docstrings(_lowercase )
def __call__( self , _lowercase , _lowercase ):
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
@add_start_docstrings(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , **_lowercase ):
"""simple docstring"""
for processor in self:
_lowerCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(_lowercase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
_lowerCAmelCase = processor(_lowercase , _lowercase , _lowercase , **_lowercase )
else:
_lowerCAmelCase = processor(_lowercase , _lowercase , _lowercase )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
_lowerCAmelCase = temperature
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = scores / self.temperature
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = -float("""Inf""" ) , _lowercase = 1 ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(_lowercase , _lowercase ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
_lowerCAmelCase = top_p
_lowerCAmelCase = filter_value
_lowerCAmelCase = min_tokens_to_keep
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = lax.top_k(_lowercase , scores.shape[-1] )
_lowerCAmelCase = jnp.full_like(_lowercase , self.filter_value )
_lowerCAmelCase = jax.nn.softmax(_lowercase , axis=-1 ).cumsum(axis=-1 )
_lowerCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_lowerCAmelCase = jnp.roll(_lowercase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowercase )
# min tokens to keep
_lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(_lowercase )
_lowerCAmelCase = jnp.where(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = jax.lax.sort_key_val(_lowercase , _lowercase )[-1]
return next_scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = -float("""Inf""" ) , _lowercase = 1 ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
_lowerCAmelCase = max(_lowercase , _lowercase )
_lowerCAmelCase = filter_value
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = scores.shape
_lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
_lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
_lowerCAmelCase , _lowerCAmelCase = lax.top_k(_lowercase , _lowercase )
_lowerCAmelCase = jnp.broadcast_to((jnp.arange(_lowercase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_lowerCAmelCase = topk_scores.flatten()
_lowerCAmelCase = topk_indices.flatten() + shift
_lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(_lowercase )
_lowerCAmelCase = next_scores_flat.reshape(_lowercase , _lowercase )
return next_scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = bos_token_id
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = jnp.full(scores.shape , -float("""inf""" ) )
_lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 )
_lowerCAmelCase = jnp.where(_lowercase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowercase )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = max_length
_lowerCAmelCase = eos_token_id
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = jnp.full(scores.shape , -float("""inf""" ) )
_lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_lowerCAmelCase = jnp.where(_lowercase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowercase )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(_lowercase , _lowercase ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
_lowerCAmelCase = min_length
_lowerCAmelCase = eos_token_id
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_lowerCAmelCase = jnp.where(_lowercase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , _lowercase )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = begin_index
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
_lowerCAmelCase = jnp.where(_lowercase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , _lowercase )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = list(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = dict(_lowercase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_lowerCAmelCase = force_token_array.at[index].set(_lowercase )
_lowerCAmelCase = jnp.intaa(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
def _force_token(_lowercase ):
_lowerCAmelCase = scores.shape[0]
_lowerCAmelCase = self.force_token_array[generation_idx]
_lowerCAmelCase = jnp.ones_like(_lowercase , dtype=scores.dtype ) * -float("""inf""" )
_lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_lowerCAmelCase = lax.dynamic_update_slice(_lowercase , _lowercase , (0, current_token) )
return new_scores
_lowerCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowercase ) , lambda: scores , ) , )
return scores
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = generate_config.eos_token_id
_lowerCAmelCase = generate_config.no_timestamps_token_id
_lowerCAmelCase = generate_config.no_timestamps_token_id + 1
_lowerCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowercase , """max_initial_timestamp_index""" ):
_lowerCAmelCase = generate_config.max_initial_timestamp_index
else:
_lowerCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_lowerCAmelCase = model_config.vocab_size
def __call__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(_lowercase , _lowercase ):
_lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , _lowercase , _lowercase )
_lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowercase , )
_lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , _lowercase , _lowercase )
_lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowercase , _lowercase , )
return jnp.where(
_lowercase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , _lowercase , )
_lowerCAmelCase = jax.vmap(_lowercase )(_lowercase , _lowercase )
_lowerCAmelCase = jnp.where(cur_len == self.begin_index , _lowercase , _lowercase )
_lowerCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowercase , )
_lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
_lowerCAmelCase = jnp.where(
_lowercase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , _lowercase , )
# if sum of probability over timestamps is above any other token, sample timestamp
_lowerCAmelCase = jax.nn.log_softmax(_lowercase , axis=-1 )
def handle_cumulative_probs(_lowercase , _lowercase ):
_lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , _lowercase , )
_lowerCAmelCase = jax.vmap(_lowercase )(_lowercase , _lowercase )
return scores
| 5 |
def __magic_name__ ( lowercase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = set({"(", "[", "{"} )
UpperCamelCase = set({")", "]", "}"} )
UpperCamelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = input("Enter sequence of brackets: " )
if is_balanced(lowercase_ ):
print(lowercase_ , "is balanced" )
else:
print(lowercase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 606 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = StableDiffusionInpaintPipeline
lowercase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ : str = frozenset([] )
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , )
_lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('''RGB''' ).resize((6_4, 6_4) )
_lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) )
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def snake_case__ ( self ):
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_lowerCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase__ , subfolder='''scheduler''' )
_lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , scheduler=lowerCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 1_0**9
| 623 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_lowerCamelCase , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_lowerCamelCase , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_lowerCamelCase , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_lowerCamelCase , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_lowerCamelCase , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_lowerCamelCase , type=_lowerCamelCase , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_lowerCamelCase , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_lowerCamelCase , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_lowerCAmelCase : List[Any] = parser.parse_args()
return args
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def fn(_lowerCamelCase ):
return tokenizer(examples['text'] )
return fn
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = []
for i in range(len(tokenized_data['input_ids'] ) ):
_lowerCAmelCase : Union[str, Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_lowerCAmelCase : Optional[Any] = tf.train.Features(feature=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = tf.train.Example(features=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = example.SerializeToString()
records.append(_lowerCamelCase )
return records
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_lowerCAmelCase : str = min(len(_lowerCamelCase ) , args.limit )
_lowerCAmelCase : List[str] = dataset.select(range(_lowerCamelCase ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_lowerCAmelCase : Optional[int] = tokenize_function(_lowerCamelCase )
_lowerCAmelCase : Any = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase ):
# Concatenate all texts.
_lowerCAmelCase : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
_lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCAmelCase : Dict = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCAmelCase : List[str] = dataset_tokenized.map(_lowerCamelCase , batched=_lowerCamelCase , batch_size=1000 , num_proc=4 )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[Any] = 0
for shard in range(0 , len(_lowerCamelCase ) , args.shard_size ):
_lowerCAmelCase : Dict = grouped_dataset[shard : shard + args.shard_size]
_lowerCAmelCase : Union[str, Any] = len(dataset_snapshot['input_ids'] )
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_lowerCAmelCase : str = get_serialized_examples(_lowerCamelCase )
with tf.io.TFRecordWriter(_lowerCamelCase ) as out_file:
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Dict = serialized_examples[i]
out_file.write(_lowerCamelCase )
print('Wrote file {} containing {} records'.format(_lowerCamelCase , _lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , 'w' ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = parse_args()
main(args)
| 259 |
"""simple docstring"""
import random
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = num - 1
_lowerCAmelCase : List[Any] = 0
while s % 2 == 0:
_lowerCAmelCase : Tuple = s // 2
t += 1
for _ in range(5 ):
_lowerCAmelCase : Dict = random.randrange(2 , num - 1 )
_lowerCAmelCase : str = pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if v != 1:
_lowerCAmelCase : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_lowerCAmelCase : str = i + 1
_lowerCAmelCase : List[str] = (v**2) % num
return True
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if num < 2:
return False
_lowerCAmelCase : Any = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase = 1024 ):
'''simple docstring'''
while True:
_lowerCAmelCase : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCamelCase ):
return num
if __name__ == "__main__":
_lowerCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 259 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 702 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase =''''''
else:
__UpperCAmelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase =in_proj_bias[: config.hidden_size]
__UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
__UpperCAmelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__UpperCAmelCase =[
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =dct.pop(snake_case__ )
__UpperCAmelCase =val
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
__UpperCAmelCase =ViTMSNConfig()
__UpperCAmelCase =1000
__UpperCAmelCase ='''datasets/huggingface/label-files'''
__UpperCAmelCase ='''imagenet-1k-id2label.json'''
__UpperCAmelCase =json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , '''r''' ) )
__UpperCAmelCase ={int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase =idalabel
__UpperCAmelCase ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__UpperCAmelCase =384
__UpperCAmelCase =1536
__UpperCAmelCase =6
elif "l16" in checkpoint_url:
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =0.1
elif "b4" in checkpoint_url:
__UpperCAmelCase =4
elif "l7" in checkpoint_url:
__UpperCAmelCase =7
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =0.1
__UpperCAmelCase =ViTMSNModel(snake_case__ )
__UpperCAmelCase =torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''target_encoder''']
__UpperCAmelCase =ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
__UpperCAmelCase =create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
__UpperCAmelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
__UpperCAmelCase =ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
__UpperCAmelCase =image_processor(images=snake_case__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__UpperCAmelCase =torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 142 | 0 |
'''simple docstring'''
from __future__ import annotations
import bisect
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[int] = 0 , __lowerCamelCase :List[str] = -1 ):
if hi < 0:
_lowerCAmelCase = len(__snake_case )
while lo < hi:
_lowerCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase = mid + 1
else:
_lowerCAmelCase = mid
return lo
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Tuple = 0 , __lowerCamelCase :List[str] = -1 ):
if hi < 0:
_lowerCAmelCase = len(__snake_case )
while lo < hi:
_lowerCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase = mid + 1
else:
_lowerCAmelCase = mid
return lo
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :Optional[Any] = 0 , __lowerCamelCase :Any = -1 ):
sorted_collection.insert(bisect_left(__snake_case , __snake_case , __snake_case , __snake_case ) , __snake_case )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] = 0 , __lowerCamelCase :Optional[Any] = -1 ):
sorted_collection.insert(bisect_right(__snake_case , __snake_case , __snake_case , __snake_case ) , __snake_case )
def A (__lowerCamelCase :Any , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = 0
_lowerCAmelCase = len(__snake_case ) - 1
while left <= right:
_lowerCAmelCase = left + (right - left) // 2
_lowerCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase = midpoint - 1
else:
_lowerCAmelCase = midpoint + 1
return None
def A (__lowerCamelCase :int , __lowerCamelCase :Dict ):
_lowerCAmelCase = bisect.bisect_left(__snake_case , __snake_case )
if index != len(__snake_case ) and sorted_collection[index] == item:
return index
return None
def A (__lowerCamelCase :int , __lowerCamelCase :List[str] , __lowerCamelCase :Dict , __lowerCamelCase :Any ):
if right < left:
return None
_lowerCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__snake_case , __snake_case , __snake_case , midpoint - 1 )
else:
return binary_search_by_recursion(__snake_case , __snake_case , midpoint + 1 , __snake_case )
if __name__ == "__main__":
_lowercase = input("""Enter numbers separated by comma:\n""").strip()
_lowercase = sorted(int(item) for item in user_input.split(""","""))
_lowercase = int(input("""Enter a single number to be found in the list:\n"""))
_lowercase = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 5 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = '''audio-spectrogram-transformer'''
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=1_6 , a=True , a=1_0 , a=1_0 , a=1_0_2_4 , a=1_2_8 , **a , ) -> int:
"""simple docstring"""
super().__init__(**a )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = patch_size
_A = qkv_bias
_A = frequency_stride
_A = time_stride
_A = max_length
_A = num_mel_bins | 317 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = abs(UpperCamelCase__ )
__UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = abs(UpperCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ , lowercase_ ) -> None:
__UpperCamelCase = F"{func.__name__}({value})"
__UpperCamelCase = timeit(F"__main__.{call}" , setup='''import __main__''' )
print(F"{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 720 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = "unispeech"
def __init__( self : str , snake_case : Union[str, Any]=32 , snake_case : Optional[Any]=768 , snake_case : Dict=12 , snake_case : Tuple=12 , snake_case : Optional[Any]=3072 , snake_case : Any="gelu" , snake_case : Dict=0.1 , snake_case : Tuple=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=0.0 , snake_case : Any=0.0 , snake_case : Optional[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Optional[int]=0.02 , snake_case : List[str]=1E-5 , snake_case : str="group" , snake_case : List[Any]="gelu" , snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case : List[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case : Tuple=False , snake_case : Optional[int]=128 , snake_case : List[str]=16 , snake_case : List[str]=False , snake_case : Dict=True , snake_case : Optional[Any]=0.05 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=2 , snake_case : List[str]=0.0 , snake_case : str=10 , snake_case : int=0 , snake_case : Tuple=320 , snake_case : Any=2 , snake_case : List[str]=0.1 , snake_case : Optional[Any]=100 , snake_case : List[Any]=256 , snake_case : Union[str, Any]=256 , snake_case : Any=0.1 , snake_case : str="mean" , snake_case : Union[str, Any]=False , snake_case : str=False , snake_case : Union[str, Any]=256 , snake_case : Optional[Any]=80 , snake_case : str=0 , snake_case : int=1 , snake_case : int=2 , snake_case : Dict=0.5 , **snake_case : Optional[int] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = feat_quantizer_dropout
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# pretraining loss
__UpperCamelCase = replace_prob
@property
def snake_case ( self : Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 375 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def _snake_case ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
_lowerCamelCase : Optional[Any] = Path(__snake_case ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
_lowerCamelCase : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 88 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=snake_case_ ):
__magic_name__ : Dict = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : List[str] , *lowercase__ : Dict , **lowercase__ : int ):
'''simple docstring'''
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase_ ( cls : Dict , *lowercase__ : List[str] , **lowercase__ : str ):
'''simple docstring'''
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase_ ( cls : str , *lowercase__ : Optional[int] , **lowercase__ : Any ):
'''simple docstring'''
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 442 | 0 |
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__a = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __a( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase = None
def lowerCamelCase__ ( _lowercase , _lowercase , ):
'''simple docstring'''
import pyspark
def generate_fn():
UpperCAmelCase_ : Optional[int] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCAmelCase_ : Union[str, Any] = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' )
UpperCAmelCase_ : int = partition_df.collect()
UpperCAmelCase_ : str = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __a( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,) -> Dict:
UpperCAmelCase_ : str = df
UpperCAmelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase_ : str = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ) -> Dict:
yield from self.generate_examples_fn()
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable":
UpperCAmelCase_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df ,partition_order=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable":
UpperCAmelCase_ : Dict = self.split_shard_indices_by_worker(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df ,partition_order=_SCREAMING_SNAKE_CASE )
@property
def a__ ( self ) -> int:
return len(self.partition_order )
class __a( datasets.DatasetBuilder ):
"""simple docstring"""
lowerCAmelCase = SparkConfig
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
import pyspark
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ : Union[str, Any] = df
UpperCAmelCase_ : Optional[Any] = working_dir
super().__init__(
cache_dir=_SCREAMING_SNAKE_CASE ,config_name=str(self.df.semanticHash() ) ,**_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> int:
# Returns the path of the created file.
def create_cache_and_write_probe(_SCREAMING_SNAKE_CASE ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = os.path.join(self._cache_dir ,'''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_SCREAMING_SNAKE_CASE ,'''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' ,'''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_SCREAMING_SNAKE_CASE ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def a__ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(_SCREAMING_SNAKE_CASE ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
UpperCAmelCase_ : Union[str, Any] = self.df.count()
UpperCAmelCase_ : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ : Tuple = (
self.df.limit(_SCREAMING_SNAKE_CASE )
.repartition(1 )
.mapInArrow(_SCREAMING_SNAKE_CASE ,'''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ : List[Any] = min(_SCREAMING_SNAKE_CASE ,int(approx_total_size / max_shard_size ) )
UpperCAmelCase_ : List[str] = self.df.repartition(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
UpperCAmelCase_ : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCAmelCase_ : Dict = os.path.join(self._working_dir ,os.path.basename(_SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath
UpperCAmelCase_ : List[Any] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ : Any = self.config.features
UpperCAmelCase_ : Any = self._writer_batch_size
UpperCAmelCase_ : Optional[Any] = self._fs.storage_options
def write_arrow(_SCREAMING_SNAKE_CASE ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ : List[str] = next(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,)
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = writer_class(
features=_SCREAMING_SNAKE_CASE ,path=working_fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,writer_batch_size=_SCREAMING_SNAKE_CASE ,storage_options=_SCREAMING_SNAKE_CASE ,embed_local_files=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(_SCREAMING_SNAKE_CASE )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,)
shard_id += 1
UpperCAmelCase_ : Optional[int] = writer_class(
features=writer._features ,path=working_fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,writer_batch_size=_SCREAMING_SNAKE_CASE ,storage_options=_SCREAMING_SNAKE_CASE ,embed_local_files=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(_SCREAMING_SNAKE_CASE )
if writer._num_bytes > 0:
UpperCAmelCase_, UpperCAmelCase_ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Dict = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) ,os.path.basename(_SCREAMING_SNAKE_CASE ) )
shutil.move(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = (
self.df.mapInArrow(_SCREAMING_SNAKE_CASE ,'''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) ,pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) ,pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) ,pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "arrow" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> int:
self._validate_cache_dir()
UpperCAmelCase_ : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = not is_remote_filesystem(self._fs )
UpperCAmelCase_ : Dict = os.path.join if is_local else posixpath.join
UpperCAmelCase_ : str = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCAmelCase_ : List[Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
UpperCAmelCase_ : Optional[Any] = path_join(self._output_dir ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = []
for task_id, content in self._prepare_split_single(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = total_num_examples
UpperCAmelCase_ : str = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
UpperCAmelCase_ : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,):
rename(
_SCREAMING_SNAKE_CASE ,fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,fpath.replace('''TTTTT-SSSSS''' ,f'''{global_shard_id:05d}''' ).replace('''NNNNN''' ,f'''{total_shards:05d}''' ) ,)
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_, UpperCAmelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(_SCREAMING_SNAKE_CASE ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_SCREAMING_SNAKE_CASE ,len(_SCREAMING_SNAKE_CASE ) ).map(lambda _SCREAMING_SNAKE_CASE : _rename_shard(*_SCREAMING_SNAKE_CASE ) ).collect()
else:
# don't use any pattern
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,fpath.replace(_SCREAMING_SNAKE_CASE ,'''''' ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df ) | 30 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
snake_case_ : List[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : str=None , __magic_name__ : Tuple=1 ) -> str:
lowerCamelCase_ : List[Any] = tokenizer
lowerCamelCase_ : List[Any] = dataset
lowerCamelCase_ : Dict = len(__magic_name__ ) if n_tasks is None else n_tasks
lowerCamelCase_ : List[str] = n_copies
def __iter__( self : Optional[int] ) -> int:
lowerCamelCase_ : Any = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
lowerCamelCase_ : int = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] ) -> str:
lowerCamelCase_ : Dict = start_length
lowerCamelCase_ : List[str] = eof_strings
lowerCamelCase_ : Optional[int] = tokenizer
def __call__( self : Any , __magic_name__ : str , __magic_name__ : Optional[Any] , **__magic_name__ : Union[str, Any] ) -> Any:
lowerCamelCase_ : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ : List[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__magic_name__ )
def __a ( __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = re.split("(%s)" % "|".join(__UpperCAmelCase ) , __UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=20 , **__UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Dict = defaultdict(__UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCAmelCase ) ):
with torch.no_grad():
lowerCamelCase_ : List[Any] = batch["ids"].shape[-1]
lowerCamelCase_ : List[Any] = accelerator.unwrap_model(__UpperCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__UpperCAmelCase , **__UpperCAmelCase )
# each task is generated batch_size times
lowerCamelCase_ : Optional[int] = batch["task_id"].repeat(__UpperCAmelCase )
lowerCamelCase_ : Tuple = accelerator.pad_across_processes(
__UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
lowerCamelCase_ : Optional[int] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCAmelCase , __UpperCAmelCase ):
gen_token_dict[task].append(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = [[] for _ in range(__UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ : str = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
code_gens[task].append(remove_last_block(__UpperCAmelCase ) )
return code_gens
def __a ( ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = HfArgumentParser(__UpperCAmelCase )
lowerCamelCase_ : str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ : Tuple = "false"
if args.num_workers is None:
lowerCamelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__UpperCAmelCase )
# Load model and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ : str = tokenizer.eos_token
lowerCamelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ : Any = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __UpperCAmelCase , __UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ : Optional[Any] = load_dataset("openai_humaneval" )
lowerCamelCase_ : List[str] = load_metric("code_eval" )
lowerCamelCase_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
lowerCamelCase_ : List[Any] = args.n_samples // args.batch_size
lowerCamelCase_ : str = TokenizedDataset(__UpperCAmelCase , human_eval["test"] , n_copies=__UpperCAmelCase , n_tasks=__UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ : Dict = DataLoader(__UpperCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ : Dict = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
lowerCamelCase_ , lowerCamelCase_ : str = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Any = complete_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , n_tasks=__UpperCAmelCase , batch_size=args.batch_size , **__UpperCAmelCase , )
if accelerator.is_main_process:
lowerCamelCase_ : Union[str, Any] = []
for task in tqdm(range(__UpperCAmelCase ) ):
lowerCamelCase_ : int = human_eval["test"][task]["test"]
lowerCamelCase_ : Any = f"check({human_eval['test'][task]['entry_point']})"
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_ , lowerCamelCase_ : Any = code_eval_metric.compute(
references=__UpperCAmelCase , predictions=__UpperCAmelCase , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 488 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 698 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase = "document_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = VisionEncoderDecoderModel
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self : Any , *_a : int , **_a : Dict ) -> int:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , _a : "Image" , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_SCREAMING_SNAKE_CASE =task_prompt.replace('''{user_input}''' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any] ) -> int:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def __UpperCamelCase ( self : Any , _a : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"] | 704 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : str = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "data2vec-text"
def __init__( self : Optional[int] , _a : Union[str, Any]=3_0522 , _a : Any=768 , _a : List[Any]=12 , _a : Any=12 , _a : List[str]=3072 , _a : Optional[Any]="gelu" , _a : List[str]=0.1 , _a : Optional[Any]=0.1 , _a : Optional[Any]=512 , _a : Any=2 , _a : Optional[Any]=0.02 , _a : Dict=1E-12 , _a : int=1 , _a : Any=0 , _a : List[Any]=2 , _a : Dict="absolute" , _a : Optional[Any]=True , _a : Dict=None , **_a : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =classifier_dropout
class A__ ( UpperCamelCase__ ):
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 191 | 0 |
'''simple docstring'''
_UpperCAmelCase : int = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_UpperCAmelCase : Dict = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_UpperCAmelCase : Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 72 |
_lowerCamelCase : Optional[Any] = 256
# Modulus to hash a string
_lowerCamelCase : Optional[Any] = 1_000_003
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE : Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) -> None:
SCREAMING_SNAKE_CASE : Any = 'abc1abc12'
SCREAMING_SNAKE_CASE : str = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
SCREAMING_SNAKE_CASE : List[str] = 'ABABX'
SCREAMING_SNAKE_CASE : int = 'ABABZABABYABABX'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
SCREAMING_SNAKE_CASE : int = 'AAAB'
SCREAMING_SNAKE_CASE : Tuple = 'ABAAAAAB'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
SCREAMING_SNAKE_CASE : Tuple = 'abcdabcy'
SCREAMING_SNAKE_CASE : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
SCREAMING_SNAKE_CASE : List[Any] = 'Lü'
SCREAMING_SNAKE_CASE : Optional[Any] = 'Lüsai'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'Lue'
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp() | 352 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( a_ , a_) -> Optional[Any]:
snake_case_ = checkpoint
snake_case_ = {}
snake_case_ = vae_state_dict['encoder.conv_in.weight']
snake_case_ = vae_state_dict['encoder.conv_in.bias']
snake_case_ = vae_state_dict['encoder.conv_out.weight']
snake_case_ = vae_state_dict['encoder.conv_out.bias']
snake_case_ = vae_state_dict['encoder.norm_out.weight']
snake_case_ = vae_state_dict['encoder.norm_out.bias']
snake_case_ = vae_state_dict['decoder.conv_in.weight']
snake_case_ = vae_state_dict['decoder.conv_in.bias']
snake_case_ = vae_state_dict['decoder.conv_out.weight']
snake_case_ = vae_state_dict['decoder.conv_out.bias']
snake_case_ = vae_state_dict['decoder.norm_out.weight']
snake_case_ = vae_state_dict['decoder.norm_out.bias']
snake_case_ = vae_state_dict['quant_conv.weight']
snake_case_ = vae_state_dict['quant_conv.bias']
snake_case_ = vae_state_dict['post_quant_conv.weight']
snake_case_ = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
snake_case_ = len({'.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'encoder.down' in layer})
snake_case_ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(snake_case__)
}
# Retrieves the keys for the decoder up blocks only
snake_case_ = len({'.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'decoder.up' in layer})
snake_case_ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(snake_case__)
}
for i in range(snake_case__):
snake_case_ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
snake_case_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''')
snake_case_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''')
snake_case_ = renew_vae_resnet_paths(snake_case__)
snake_case_ = {'old': f'''down.{i}.block''', 'new': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__)
snake_case_ = [key for key in vae_state_dict if 'encoder.mid.block' in key]
snake_case_ = 2
for i in range(1 , num_mid_res_blocks + 1):
snake_case_ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
snake_case_ = renew_vae_resnet_paths(snake_case__)
snake_case_ = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__)
snake_case_ = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
snake_case_ = renew_vae_attention_paths(snake_case__)
snake_case_ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__)
conv_attn_to_linear(snake_case__)
for i in range(snake_case__):
snake_case_ = num_up_blocks - 1 - i
snake_case_ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
snake_case_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
snake_case_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
snake_case_ = renew_vae_resnet_paths(snake_case__)
snake_case_ = {'old': f'''up.{block_id}.block''', 'new': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__)
snake_case_ = [key for key in vae_state_dict if 'decoder.mid.block' in key]
snake_case_ = 2
for i in range(1 , num_mid_res_blocks + 1):
snake_case_ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
snake_case_ = renew_vae_resnet_paths(snake_case__)
snake_case_ = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__)
snake_case_ = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
snake_case_ = renew_vae_attention_paths(snake_case__)
snake_case_ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__)
conv_attn_to_linear(snake_case__)
return new_checkpoint
def __UpperCAmelCase ( a_ , a_ , ) -> Union[str, Any]:
# Only support V1
snake_case_ = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml')
snake_case_ = io.BytesIO(r.content)
snake_case_ = OmegaConf.load(snake_case__)
snake_case_ = 5_12
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors'):
from safetensors import safe_open
snake_case_ = {}
with safe_open(snake_case__ , framework='pt' , device='cpu') as f:
for key in f.keys():
snake_case_ = f.get_tensor(snake_case__)
else:
snake_case_ = torch.load(snake_case__ , map_location=snake_case__)['state_dict']
# Convert the VAE model.
snake_case_ = create_vae_diffusers_config(snake_case__ , image_size=snake_case__)
snake_case_ = custom_convert_ldm_vae_checkpoint(snake_case__ , snake_case__)
snake_case_ = AutoencoderKL(**snake_case__)
vae.load_state_dict(snake_case__)
vae.save_pretrained(snake_case__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowercase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 700 |
def __UpperCAmelCase ( a_):
if not isinstance(a_ , a_):
raise ValueError('Input must be an integer')
if input_num <= 0:
raise ValueError('Input must be positive')
return sum(
divisor for divisor in range(1 , input_num // 2 + 1) if input_num % divisor == 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 607 | 0 |
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a (_lowerCAmelCase = 5_0_0_0 ):
SCREAMING_SNAKE_CASE_ = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = pentagonal_nums[j]
SCREAMING_SNAKE_CASE_ = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE_ = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 234 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__SCREAMING_SNAKE_CASE =["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
__SCREAMING_SNAKE_CASE ={"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE =""" Hello world! cécé herlolip"""
__SCREAMING_SNAKE_CASE =[
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(_lowerCAmelCase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
if not os.path.exists(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.hub.load('''pytorch/fairseq''' , _lowerCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_ = load_xsum_checkpoint(_lowerCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE_ = checkpoint_path.replace('''.''' , '''-''' )
SCREAMING_SNAKE_CASE_ = BartConfig.from_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bart.encode(_lowerCAmelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = BartTokenizer.from_pretrained(_lowerCAmelCase ).encode(_lowerCAmelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(_lowerCAmelCase , _lowerCAmelCase ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE_ = bart.state_dict()
remove_ignore_keys_(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BartForSequenceClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bart.predict('''mnli''' , _lowerCAmelCase , return_logits=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE_ = bart.model.state_dict()
remove_ignore_keys_(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE_ = bart.extract_features(_lowerCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE_ = BartModel(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase ).model[0]
else:
SCREAMING_SNAKE_CASE_ = BartForConditionalGeneration(_lowerCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowerCAmelCase )
if hasattr(_lowerCAmelCase , '''lm_head''' ):
SCREAMING_SNAKE_CASE_ = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE_ = model.model(_lowerCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 234 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase_: Tuple = datasets.logging.get_logger(__name__)
lowercase_: Optional[int] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
lowercase_: Tuple = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
lowercase_: Dict = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ (datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowercase ( self : Union[str, Any] , __a : str ):
if self.config_name == "default":
snake_case__ : List[Any] = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
snake_case__ : str = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase ( self : Tuple , __a : str , __a : Union[str, Any] , __a : Union[str, Any] , __a : Any=None , __a : Optional[int]=False ):
if gpus is None:
snake_case__ : Optional[int] = 1 if torch.cuda.is_available() else 0
snake_case__ : Dict = {"""src""": sources, """mt""": predictions, """ref""": references}
snake_case__ : Tuple = [dict(zip(__A , __A ) ) for t in zip(*data.values() )]
snake_case__ , snake_case__ : Tuple = self.scorer.predict(__A , gpus=__A , progress_bar=__A )
return {"mean_score": mean_score, "scores": scores}
| 718 |
import re
import string
import numpy as np
import datasets
lowercase_: Optional[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase_: Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase_: str = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ (datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowercase ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int]=None , __a : int=False , __a : Any=False , __a : Dict=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ : Union[str, Any] = np.array([re.sub(__a , """""" , __a ) for x in predictions] )
snake_case__ : Union[str, Any] = np.array([re.sub(__a , """""" , __a ) for x in references] )
else:
snake_case__ : List[str] = np.asarray(__a )
snake_case__ : int = np.asarray(__a )
if ignore_case:
snake_case__ : str = np.char.lower(__a )
snake_case__ : Tuple = np.char.lower(__a )
if ignore_punctuation:
snake_case__ : str = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case__ : List[Any] = np.char.translate(__a , table=__a )
snake_case__ : Tuple = np.char.translate(__a , table=__a )
if ignore_numbers:
snake_case__ : Union[str, Any] = string.digits.maketrans("""""" , """""" , string.digits )
snake_case__ : Dict = np.char.translate(__a , table=__a )
snake_case__ : int = np.char.translate(__a , table=__a )
snake_case__ : Any = predictions == references
return {"exact_match": np.mean(__a ) * 1_0_0}
| 127 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase )-> bool:
UpperCamelCase = len(__UpperCamelCase )
# We need to create solution object to save path.
UpperCamelCase = [[0 for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
UpperCamelCase = run_maze(__UpperCamelCase , 0 , 0 , __UpperCamelCase )
if solved:
print("""\n""".join(str(__UpperCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
UpperCamelCase = len(__UpperCamelCase )
# Final check point.
if i == j == (size - 1):
UpperCamelCase = 1
return True
UpperCamelCase = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase = 1
# check for directions
if (
run_maze(__UpperCamelCase , i + 1 , __UpperCamelCase , __UpperCamelCase )
or run_maze(__UpperCamelCase , __UpperCamelCase , j + 1 , __UpperCamelCase )
or run_maze(__UpperCamelCase , i - 1 , __UpperCamelCase , __UpperCamelCase )
or run_maze(__UpperCamelCase , __UpperCamelCase , j - 1 , __UpperCamelCase )
):
return True
UpperCamelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 1 |
def _lowerCAmelCase ( __magic_name__ :str ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
UpperCAmelCase_ = sorted(string.lower() )
return len(__magic_name__ ) == len(set(__magic_name__ ) )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = input('Enter a string ').strip()
_lowerCamelCase : List[Any] = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 407 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
UpperCAmelCase_ = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method='''nm''' )
UpperCAmelCase_ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = regressor.predict(__magic_name__ )
return y_pred[0]
def _lowerCAmelCase ( __magic_name__ :list ):
train_user.sort()
UpperCAmelCase_ = np.percentile(__magic_name__ , 2_5 )
UpperCAmelCase_ = np.percentile(__magic_name__ , 7_5 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :float ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_lowerCamelCase : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_lowerCamelCase : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : List[str] = normalize_df[:, 2].tolist()
_lowerCamelCase : Dict = normalize_df[:, 0].tolist()
_lowerCamelCase : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : Any = x[: len(x) - 1]
_lowerCamelCase : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : List[str] = total_date[: len(total_date) - 1]
_lowerCamelCase : Any = total_user[: len(total_user) - 1]
_lowerCamelCase : Dict = total_match[: len(total_match) - 1]
_lowerCamelCase : Any = total_date[len(total_date) - 1 :]
_lowerCamelCase : List[str] = total_user[len(total_user) - 1 :]
_lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : List[str] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 407 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCamelCase ( snake_case ):
def wrapper(*snake_case , **snake_case ):
_lowerCAmelCase = timeit.default_timer()
_lowerCAmelCase = func(*snake_case , **snake_case )
_lowerCAmelCase = timeit.default_timer() - starttime
return delta
_lowerCAmelCase = func.__name__
return wrapper
def _lowerCamelCase ( snake_case , snake_case=100 , snake_case=None ):
_lowerCAmelCase = []
_lowerCAmelCase = seq_shapes or {}
for i in range(snake_case ):
_lowerCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(snake_case , _ArrayXD ):
_lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(snake_case , datasets.Value ):
if v.dtype == "string":
_lowerCAmelCase = 'The small grey turtle was surprisingly fast when challenged.'
else:
_lowerCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(snake_case , datasets.Sequence ):
while isinstance(snake_case , datasets.Sequence ):
_lowerCAmelCase = v.feature
_lowerCAmelCase = seq_shapes[k]
_lowerCAmelCase = np.random.rand(*snake_case ).astype(v.dtype )
_lowerCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCamelCase ( snake_case , snake_case , snake_case=100 , snake_case=None ):
_lowerCAmelCase = generate_examples(snake_case , num_examples=snake_case , seq_shapes=snake_case )
with ArrowWriter(features=snake_case , path=snake_case ) as writer:
for key, record in dummy_data:
_lowerCAmelCase = features.encode_example(snake_case )
writer.write(snake_case )
_lowerCAmelCase , _lowerCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
_lowerCAmelCase = datasets.Dataset.from_file(filename=snake_case , info=datasets.DatasetInfo(features=snake_case ) )
return dataset
| 192 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase: Any = logging.get_logger(__name__)
# General docstring
_lowercase: List[Any] = '''RegNetConfig'''
# Base docstring
_lowercase: List[Any] = '''facebook/regnet-y-040'''
_lowercase: int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowercase: Union[str, Any] = '''facebook/regnet-y-040'''
_lowercase: Tuple = '''tabby, tabby cat'''
_lowercase: str = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : int = 3 , lowercase__ : int = 1 , lowercase__ : int = 1 , lowercase__ : Optional[str] = "relu" , ):
super().__init__()
_lowerCAmelCase = nn.Convad(
lowercase__ , lowercase__ , kernel_size=lowercase__ , stride=lowercase__ , padding=kernel_size // 2 , groups=lowercase__ , bias=lowercase__ , )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowerCAmelCase = self.embedder(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 ):
super().__init__()
_lowerCAmelCase = nn.Convad(lowercase__ , lowercase__ , kernel_size=1 , stride=lowercase__ , bias=lowercase__ )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tensor ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
super().__init__()
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase = nn.Sequential(
nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[Any] ):
# b c h w -> b c 1 1
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = self.attention(lowercase__ )
_lowerCAmelCase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 2 , ):
super().__init__()
_lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , ) , *[layer(lowercase__ , lowercase__ , lowercase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.layers(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tensor , lowercase__ : bool = False , lowercase__ : bool = True ):
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(lowercase__ )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =RegNetConfig
UpperCamelCase__ ="regnet"
UpperCamelCase__ ="pixel_values"
UpperCamelCase__ =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[Any] ):
if isinstance(lowercase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str] , lowercase__ : List[Any]=False ):
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = value
_lowercase: Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase: str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[str] , lowercase__ : int ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = RegNetEmbeddings(lowercase__ )
_lowerCAmelCase = RegNetEncoder(lowercase__ )
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tensor , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None ):
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : str , lowercase__ : Union[str, Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = RegNetModel(lowercase__ )
# classification head
_lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[torch.LongTensor] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , ):
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase = 'single_label_classification'
else:
_lowerCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase = BCEWithLogitsLoss()
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 192 | 1 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class snake_case ( UpperCamelCase_):
# warning at import time
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , UpperCamelCase_ , ) | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
_lowercase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowerCamelCase: float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowerCamelCase: float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowerCamelCase: int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowerCamelCase: int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( snake_case__ : DataTrainingArguments , snake_case__ : PreTrainedTokenizer , snake_case__ : bool = False , snake_case__ : Optional[str] = None , ):
def _dataset(snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , )
return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
A = AutoModelWithLMHead.from_config(snake_case__ )
model.resize_token_embeddings(len(snake_case__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A = DataCollatorForWholeWordMask(
tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability )
else:
A = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , )
# Training
if training_args.do_train:
A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=snake_case__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = math.exp(eval_output['eval_loss'] )
A = {'perplexity': perplexity}
A = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , snake_case__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(snake_case__ )
return results
def _snake_case ( snake_case__ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 91 |
"""simple docstring"""
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = '''hp'''
_lowerCamelCase: List[Any] = {}
_lowerCamelCase: List[Any] = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple:
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int:
if len(A_ ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A_ ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ : Optional[Any] ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple:
A = TrialShortNamer.shortname_for_key(A_ ,A_ )
A = short_name
A = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]:
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ ,A_ )
A = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(A_ ,A_ ):
A = 1 if v else 0
A = '' if isinstance(A_ ,(int, float) ) else '-'
A = F'{key}{sep}{v}'
name.append(A_ )
return "_".join(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int:
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,A_ )
A = float(re.sub('[^0-9.]' ,'' ,A_ ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters | 91 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowercase__ : Tuple = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowercase__ : Optional[int] = {
"""allenai/longformer-base-4096""": 4_0_9_6,
"""allenai/longformer-large-4096""": 4_0_9_6,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase_ : Any = bs[:]
lowerCAmelCase_ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase_ : Dict = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = set()
lowerCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ : Optional[Any] = char
return pairs
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]="replace" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Tuple=False , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCAmelCase_ : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCAmelCase_ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCAmelCase_ : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCAmelCase_ : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCAmelCase_ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase_ : Optional[Any] = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[str] = errors # how to handle errors in decoding
lowerCAmelCase_ : Tuple = bytes_to_unicode()
lowerCAmelCase_ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCAmelCase_ : int = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Tuple = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase_ : List[Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase_ : List[str] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : str = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Union[str, Any] = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase_ : Any = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : List[Any] = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[str] = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ : List[str] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase_ : str = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCAmelCase_ : Optional[int] = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase_ : str = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Dict = [self.cls_token_id]
lowerCAmelCase_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase_ : int = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=False , **SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : int = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Any = ' ' + text
return (text, kwargs)
| 317 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """feature_extractor"""]
_SCREAMING_SNAKE_CASE = """TvltImageProcessor"""
_SCREAMING_SNAKE_CASE = """TvltFeatureExtractor"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = image_processor
lowerCAmelCase_ : Optional[int] = feature_extractor
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowerCAmelCase_ : List[str] = None
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(SCREAMING_SNAKE_CASE_ , mask_pixel=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images_mixed is not None:
lowerCAmelCase_ : int = self.image_processor(SCREAMING_SNAKE_CASE_ , is_mixed=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
lowerCAmelCase_ : Optional[Any] = self.feature_extractor(
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , mask_audio=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
return output_dict
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.image_processor.model_input_names
lowerCAmelCase_ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 317 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase : Tuple = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : int=None ):
lowerCamelCase__ = True
while ask_again:
lowerCamelCase__ = input(__lowerCAmelCase )
try:
if default is not None and len(__lowerCAmelCase ) == 0:
return default
return convert_value(__lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=[] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=0 ):
lowerCamelCase__ = BulletMenu(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = menu.run(default_choice=__lowerCAmelCase )
return convert_value(__lowerCAmelCase ) if convert_value is not None else result
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = int(__lowerCAmelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = int(__lowerCAmelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = int(__lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A__ ( __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = int(__lowerCAmelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = int(__lowerCAmelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def A__ ( __lowerCAmelCase : Any ):
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase__ (argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = super()._format_usage(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = usage.replace("""<command> [<args>] """ ,"""""" )
return usage
| 50 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = AudioLDMPipeline
_lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS
_lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
_lowerCamelCase: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,)
A = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,)
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,)
A = ClapTextModelWithProjection(A_ )
A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 )
A = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,)
A = SpeechTaHifiGan(A_ )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = audioldm_pipe(**A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
A = audio[:10]
A = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 3 * [inputs['prompt']]
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
A = self.get_dummy_inputs(A_ )
A = 3 * [inputs.pop('prompt' )]
A = audioldm_pipe.tokenizer(
A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,)
A = text_inputs['input_ids'].to(A_ )
A = audioldm_pipe.text_encoder(
A_ ,)
A = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A = F.normalize(A_ ,dim=-1 )
A = prompt_embeds
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 3 * ['this is a negative prompt']
A = negative_prompt
A = 3 * [inputs['prompt']]
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
A = self.get_dummy_inputs(A_ )
A = 3 * [inputs.pop('prompt' )]
A = []
for p in [prompt, negative_prompt]:
A = audioldm_pipe.tokenizer(
A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,)
A = text_inputs['input_ids'].to(A_ )
A = audioldm_pipe.text_encoder(
A_ ,)
A = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A = F.normalize(A_ ,dim=-1 )
embeds.append(A_ )
A , A = embeds
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(skip_prk_steps=A_ )
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 'egg cracking'
A = audioldm_pipe(**A_ ,negative_prompt=A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
A = audio[:10]
A = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(skip_prk_steps=A_ )
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
A = 2
A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
A = 2
A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
A = 2
A = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = audioldm_pipe.vocoder.config.sampling_rate
A = self.get_dummy_inputs(A_ )
A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.0_16
A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.0_32
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = ['hey']
A = audioldm_pipe(A_ ,num_inference_steps=1 )
A = output.audios.shape
assert audio_shape == (1, 256)
A = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
A = SpeechTaHifiGan(A_ ).to(A_ )
A = audioldm_pipe(A_ ,num_inference_steps=1 )
A = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) )
A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ )
A = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs(A_ )
A = 25
A = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 8_1920
A = audio[7_7230:7_7240]
A = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs(A_ )
A = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 8_1920
A = audio[2_7780:2_7790]
A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2 | 91 | 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
a__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
a__ : List[Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str=None) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase)),
}
| 702 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[int] ,A_ : str=7 ,A_ : Any=3 ,A_ : Any=18 ,A_ : Union[str, Any]=30 ,A_ : List[Any]=400 ,A_ : Union[str, Any]=True ,A_ : Dict=32 ,A_ : int=True ,) -> Union[str, Any]:
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size_divisor
A = do_rescale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = GLPNImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = GLPNImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size_divisor' ) )
self.assertTrue(hasattr(A_ ,'resample' ) )
self.assertTrue(hasattr(A_ ,'do_rescale' ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 91 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128s''' , pretrained=UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 1_92:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_192''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 2_56:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_256''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 3_84:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_384''' , pretrained=UpperCAmelCase__ )
from_model.eval()
__SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = from_model.state_dict()
__SCREAMING_SNAKE_CASE = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for i in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn((2, 3, 2_24, 2_24) )
__SCREAMING_SNAKE_CASE = from_model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = our_model(UpperCAmelCase__ ).logits
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = name
print(UpperCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowerCAmelCase__ =parser.parse_args()
lowerCAmelCase__ =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 482 | 0 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Optional[Any]:
return EnvironmentCommand()
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->int:
return EnvironmentCommand(args.accelerate_config_file )
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
@staticmethod
def _UpperCamelCase ( snake_case : Dict ):
'''simple docstring'''
A__ : str = parser.add_parser("""env""" )
download_parser.set_defaults(func=snake_case )
download_parser.add_argument(
"""--accelerate-config_file""" , default=snake_case , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=snake_case )
def __init__( self : Union[str, Any] , snake_case : Union[str, Any] , *snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = accelerate_config_file
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = """not installed"""
if is_safetensors_available():
import safetensors
A__ : str = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A__ : Dict = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
A__ : Any = """not installed"""
A__ : List[str] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A__ : List[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(snake_case ):
A__ : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
A__ : Union[str, Any] = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(snake_case , snake_case )
else F'\t{accelerate_config}'
)
A__ : List[str] = """not installed"""
A__ : Tuple = """NA"""
if is_torch_available():
import torch
A__ : int = torch.__version__
A__ : Tuple = torch.cuda.is_available()
A__ : str = """not installed"""
A__ : List[str] = """NA"""
if is_tf_available():
import tensorflow as tf
A__ : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
A__ : Optional[int] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A__ : Any = bool(tf.config.list_physical_devices("""GPU""" ) )
A__ : Union[str, Any] = """not installed"""
A__ : Optional[Any] = """not installed"""
A__ : str = """not installed"""
A__ : Dict = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A__ : List[str] = flax.__version__
A__ : str = jax.__version__
A__ : List[Any] = jaxlib.__version__
A__ : List[Any] = jax.lib.xla_bridge.get_backend().platform
A__ : Any = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'{safetensors_version}',
"""Accelerate version""": F'{accelerate_version}',
"""Accelerate config""": F'{accelerate_config_str}',
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": F'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": F'{flax_version} ({jax_backend})',
"""Jax version""": F'{jax_version}',
"""JaxLib version""": F'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(snake_case ) )
return info
@staticmethod
def _UpperCamelCase ( snake_case : Any ):
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 717 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0_0 ) ->int:
A__ : Optional[int] = 2**power
A__ : Dict = 0
while n:
A__ , A__ : Tuple = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 498 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "mctct"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=8_065 , SCREAMING_SNAKE_CASE__ : Tuple=1_536 , SCREAMING_SNAKE_CASE__ : int=36 , SCREAMING_SNAKE_CASE__ : List[Any]=6_144 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=384 , SCREAMING_SNAKE_CASE__ : Optional[Any]=920 , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE__ : str=0.3 , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : str=0.3 , SCREAMING_SNAKE_CASE__ : List[Any]=0.3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Any=0.3 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Tuple=(7,) , SCREAMING_SNAKE_CASE__ : List[str]=(3,) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=80 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str="sum" , SCREAMING_SNAKE_CASE__ : Dict=False , **SCREAMING_SNAKE_CASE__ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = conv_glu_dim
lowerCAmelCase__ = conv_dropout
lowerCAmelCase__ = num_conv_layers
lowerCAmelCase__ = input_feat_per_channel
lowerCAmelCase__ = input_channels
lowerCAmelCase__ = conv_channels
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 61 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__SCREAMING_SNAKE_CASE : Any =6_378_137.0
__SCREAMING_SNAKE_CASE : Optional[int] =6_356_752.314_245
__SCREAMING_SNAKE_CASE : Any =637_8137
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
A: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
A: str = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) )
A: int = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) )
A: Optional[Any] = radians(lowerCamelCase__ )
A: Optional[int] = radians(lowerCamelCase__ )
# Equation
A: Any = sin((phi_a - phi_a) / 2 )
A: Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A: Optional[Any] = sqrt(sin_sq_phi + (cos(lowerCamelCase__ ) * cos(lowerCamelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase : Any = data_utils.TransfoXLTokenizer
_lowerCamelCase : Any = data_utils.TransfoXLCorpus
_lowerCamelCase : str = data_utils
_lowerCamelCase : Optional[Any] = data_utils
def A__ ( __A : Optional[int] , __A : Dict , __A : str , __A : Any ) ->Optional[int]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
__A =pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__A =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__A =corpus.vocab.__dict__
torch.save(__A , __A )
__A =corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
__A =pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__A =os.path.abspath(__A )
__A =os.path.abspath(__A )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__A =TransfoXLConfig()
else:
__A =TransfoXLConfig.from_json_file(__A )
print(F'''Building PyTorch model from configuration: {config}''' )
__A =TransfoXLLMHeadModel(__A )
__A =load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
__A =os.path.join(__A , __A )
__A =os.path.join(__A , __A )
print(F'''Save PyTorch model to {os.path.abspath(__A )}''' )
torch.save(model.state_dict() , __A )
print(F'''Save configuration file to {os.path.abspath(__A )}''' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 709 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=3_0 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0 , lowercase__=0.02 , lowercase__=3 , lowercase__=None , lowercase__=2 , ):
'''simple docstring'''
__A =parent
__A =batch_size
__A =image_size
__A =patch_size
__A =num_channels
__A =is_training
__A =use_labels
__A =hidden_size
__A =num_hidden_layers
__A =num_attention_heads
__A =intermediate_size
__A =hidden_act
__A =hidden_dropout_prob
__A =attention_probs_dropout_prob
__A =type_sequence_label_size
__A =initializer_range
__A =scope
__A =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A =(image_size // patch_size) ** 2
__A =num_patches + 2
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A =None
if self.use_labels:
__A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =DeiTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__A =model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =DeiTForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__A =model(lowercase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A =1
__A =DeiTForMaskedImageModeling(lowercase__ )
model.to(lowercase__ )
model.eval()
__A =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A =model(lowercase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =self.type_sequence_label_size
__A =DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__A =model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A =1
__A =DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__A =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A =model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) ,
) =config_and_inputs
__A ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =DeiTModelTester(self )
__A =ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
pass
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A =model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A =model_class(lowercase__ )
__A =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A =[*signature.parameters.keys()]
__A =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
__A =super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__A , __A =self.model_tester.prepare_config_and_inputs_for_common()
__A =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__A =model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__A =self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
__A =model(**lowercase__ ).loss
loss.backward()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__A =False
__A =True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__A =model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
__A =self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
__A =model(**lowercase__ ).loss
loss.backward()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =self.model_tester.prepare_config_and_inputs_for_common()
__A =[
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__ ),
*get_values(lowercase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
__A =problem_type['''title''']
__A =problem_type['''num_labels''']
__A =model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__A =self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if problem_type["num_labels"] > 1:
__A =inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__A =inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__ ) as warning_list:
__A =model(**lowercase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A =DeiTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def A__ ( ) ->str:
__A =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowercase__ )
__A =self.default_image_processor
__A =prepare_img()
__A =image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__A =model(**lowercase__ )
# verify the logits
__A =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__A =torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
__A =self.default_image_processor
__A =prepare_img()
__A =image_processor(images=lowercase__ , return_tensors='''pt''' )
__A =inputs.pixel_values.to(lowercase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__A =model(lowercase__ )
| 516 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__UpperCamelCase : Union[str, Any] = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase = (left + right) // 2
lowerCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def _SCREAMING_SNAKE_CASE ():
from timeit import timeit
print('Running benchmarks' )
lowerCAmelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase = timeit(F'{func}(grid=grid)' , setup=_UpperCAmelCase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 4 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase : List[str] = {
"""allenai/led-base-16384""": 16_384,
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =LEDTokenizer
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : List[str]="replace" , snake_case__ : Optional[int]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : int=False , snake_case__ : Optional[int]=True , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Dict = getattr(snake_case__ , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : str = add_prefix_space
UpperCAmelCase__ : Any = pre_tok_class(**snake_case__ )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : List[str] = "post_processor"
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
UpperCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase__ : Any = tuple(state["cls"] )
UpperCAmelCase__ : Any = False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : List[Any] = True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
UpperCAmelCase__ : Optional[int] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : List[str] = getattr(snake_case__ , state.pop("type" ) )
UpperCAmelCase__ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
UpperCAmelCase__ : Dict = value
def __a ( self : str , *snake_case__ : Any , **snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def __a ( self : str , snake_case__ : List[Any] , snake_case__ : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : Any , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : str = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Any = len(encoded_inputs["global_attention_mask"] ) != len(snake_case__ )
if needs_to_be_padded:
UpperCAmelCase__ : List[str] = len(snake_case__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Dict = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 438 | 0 |
def lowerCAmelCase__ ( a_ : list ) -> list:
UpperCAmelCase__ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ : Dict = True
for i in range(0 , len(a_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : Dict = False
for i in range(1 , len(a_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : Dict = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
UpperCamelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase_ = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list) | 704 |
'''simple docstring'''
def lowerCAmelCase__ ( a_ : float , a_ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCAmelCase__ : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(a_ ) )
return round(a_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 599 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Optional[int] = CanineTokenizer
snake_case : str = False
def snake_case_ (self ):
super().setUp()
_UpperCAmelCase : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ (self ):
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def snake_case_ (self , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = 1_0_2_4
return tokenizer
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = self.canine_tokenizer
_UpperCAmelCase : int = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
_UpperCAmelCase : str = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
_UpperCAmelCase : Any = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : int = self.canine_tokenizer
_UpperCAmelCase : int = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
_UpperCAmelCase : Optional[int] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase__ )
self.assertIn("""attention_mask""" , lowerCAmelCase__ )
self.assertIn("""token_type_ids""" , lowerCAmelCase__ )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
_UpperCAmelCase : int = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
_UpperCAmelCase : Optional[Any] = tokenizer(
text_target=lowerCAmelCase__ , max_length=3_2 , padding="""max_length""" , truncation=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def snake_case_ (self ):
# safety check on max_len default value so we are sure the test works
_UpperCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_UpperCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase : int = tempfile.mkdtemp()
_UpperCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
_UpperCAmelCase : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running"""
_UpperCAmelCase : Tuple = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_UpperCAmelCase : Dict = chr(0xE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Dict = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn(lowerCAmelCase__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_UpperCAmelCase : str = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.get_clean_sequence(lowerCAmelCase__ )
# a special token for Canine can be defined as follows:
_UpperCAmelCase : Tuple = 0xE_0_0_5
_UpperCAmelCase : List[str] = chr(lowerCAmelCase__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
_UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , input_encoded + special_token_id )
_UpperCAmelCase : List[Any] = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_UpperCAmelCase : Optional[Any] = chr(0xE_0_0_5 )
_UpperCAmelCase : str = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
_UpperCAmelCase : Tuple = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase__ )
self.assertEqual(token_a[0] , lowerCAmelCase__ )
@require_tokenizers
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
_UpperCAmelCase : List[str] = 0xE_0_0_6
_UpperCAmelCase : Any = chr(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase__ )
tokenizer.from_pretrained(lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_UpperCAmelCase : Any = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_UpperCAmelCase : Optional[int] = json.load(lowerCAmelCase__ )
# a special token for Canine can be defined as follows:
_UpperCAmelCase : str = 0xE_0_0_6
_UpperCAmelCase : Tuple = chr(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = [new_token_a]
_UpperCAmelCase : Tuple = [new_token_a]
with open(os.path.join(lowerCAmelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_UpperCAmelCase : Any = tokenizer_class.from_pretrained(lowerCAmelCase__ , extra_ids=0 )
self.assertIn(lowerCAmelCase__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_UpperCAmelCase : int = 0xE_0_0_7
_UpperCAmelCase : Union[str, Any] = chr(lowerCAmelCase__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_UpperCAmelCase : Union[str, Any] = [AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ )]
_UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , extra_ids=0 )
self.assertIn(lowerCAmelCase__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def snake_case_ (self ):
_UpperCAmelCase : int = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_UpperCAmelCase : List[str] = """hello world"""
if self.space_between_special_tokens:
_UpperCAmelCase : Tuple = """[CLS] hello world [SEP]"""
else:
_UpperCAmelCase : str = input
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.decode(lowerCAmelCase__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase__ , [output, output.lower()] )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_UpperCAmelCase : Any = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_UpperCAmelCase : Tuple = """a"""
_UpperCAmelCase : Optional[int] = ord(lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + """_id""" , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + """_id""" ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + """_id""" , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + """_id""" ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens_ids""" ) , [] )
_UpperCAmelCase : Tuple = 0xE_0_0_6
_UpperCAmelCase : Optional[int] = chr(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
| 414 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.0_2 , ):
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : int = (image_size // patch_size) ** 2
_UpperCAmelCase : Tuple = num_patches + 1
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = FlaxViTModel(config=lowerCAmelCase__ )
_UpperCAmelCase : Any = model(lowerCAmelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Union[str, Any] = (self.image_size, self.image_size)
_UpperCAmelCase : Union[str, Any] = (self.patch_size, self.patch_size)
_UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = self.type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = FlaxViTForImageClassification(config=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Dict = FlaxViTForImageClassification(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Tuple = config_and_inputs
_UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = FlaxViTModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def snake_case_ (self ):
self.config_tester.run_common_tests()
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCAmelCase__ )
_UpperCAmelCase : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Dict = [*signature.parameters.keys()]
_UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ , **lowerCAmelCase__ ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase : Any = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase : str = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case_ (self ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
_UpperCAmelCase : List[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 414 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=18 , UpperCAmelCase_=30 , UpperCAmelCase_=400 , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=[0.5, 0.5, 0.5] , UpperCAmelCase_=[0.5, 0.5, 0.5] , UpperCAmelCase_=False , ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = size if size is not None else {"height": 20, "width": 20}
lowercase__: Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase__: Optional[int] = parent
lowercase__: Optional[int] = batch_size
lowercase__: Union[str, Any] = num_channels
lowercase__: Any = image_size
lowercase__: List[Any] = min_resolution
lowercase__: Optional[Any] = max_resolution
lowercase__: Dict = do_resize
lowercase__: str = size
lowercase__: str = do_center_crop
lowercase__: List[Any] = crop_size
lowercase__: List[Any] = do_normalize
lowercase__: Any = image_mean
lowercase__: Any = image_std
lowercase__: List[str] = do_reduce_labels
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A( ):
"""simple docstring"""
lowercase__: Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase__: List[str] = Image.open(dataset[0]["file"] )
lowercase__: str = Image.open(dataset[1]["file"] )
return image, map
def A( ):
"""simple docstring"""
lowercase__: Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase__: Any = Image.open(ds[0]["file"] )
lowercase__: List[Any] = Image.open(ds[1]["file"] )
lowercase__: Optional[Any] = Image.open(ds[2]["file"] )
lowercase__: Tuple = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = BeitImageProcessor if is_vision_available() else None
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: str = BeitImageProcessingTester(self)
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_)
lowercase__: Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowercase__: int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__: Optional[int] = image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowercase__: Any = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__: Any = image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowercase__: List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__: Tuple = image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
lowercase__: Optional[Any] = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
lowercase__: Any = image_processing(image_inputs[0] , maps[0] , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
lowercase__: Tuple = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
lowercase__: List[str] = prepare_semantic_single_inputs()
lowercase__: int = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
lowercase__: List[Any] = prepare_semantic_batch_inputs()
lowercase__: List[str] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase__: List[Any] = prepare_semantic_single_inputs()
lowercase__: List[str] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 150)
lowercase__: Dict = True
lowercase__: Dict = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
| 709 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: Dict = tau * frequency / samplerate
lowercase__: List[str] = sin(snake_case_ )
lowercase__: Union[str, Any] = cos(snake_case_ )
lowercase__: Optional[Any] = _sin / (2 * q_factor)
lowercase__: int = (1 - _cos) / 2
lowercase__: Tuple = 1 - _cos
lowercase__: List[Any] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Dict = 1 - alpha
lowercase__: Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: str = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Dict = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: Any = (1 + _cos) / 2
lowercase__: str = -1 - _cos
lowercase__: Any = 1 + alpha
lowercase__: List[str] = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha
lowercase__: Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[Any] = tau * frequency / samplerate
lowercase__: Optional[int] = sin(snake_case_ )
lowercase__: List[Any] = cos(snake_case_ )
lowercase__: Any = _sin / (2 * q_factor)
lowercase__: Any = _sin / 2
lowercase__: Optional[Any] = 0
lowercase__: Any = -ba
lowercase__: Optional[int] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Any = 1 - alpha
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: List[str] = cos(snake_case_ )
lowercase__: Union[str, Any] = _sin / (2 * q_factor)
lowercase__: List[str] = 1 - alpha
lowercase__: Optional[Any] = -2 * _cos
lowercase__: str = 1 + alpha
lowercase__: List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Tuple = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: str = _sin / (2 * q_factor)
lowercase__: Optional[Any] = 10 ** (gain_db / 40)
lowercase__: Union[str, Any] = 1 + alpha * big_a
lowercase__: str = -2 * _cos
lowercase__: Tuple = 1 - alpha * big_a
lowercase__: Union[str, Any] = 1 + alpha / big_a
lowercase__: Dict = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha / big_a
lowercase__: Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Optional[Any] = tau * frequency / samplerate
lowercase__: Union[str, Any] = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Optional[int] = _sin / (2 * q_factor)
lowercase__: Optional[int] = 10 ** (gain_db / 40)
lowercase__: List[Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Any = (big_a - 1) - (big_a + 1) * _cos
lowercase__: str = (big_a - 1) + (big_a + 1) * _cos
lowercase__: int = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Union[str, Any] = big_a * (pmc + aaa)
lowercase__: List[Any] = 2 * big_a * mpc
lowercase__: Dict = big_a * (pmc - aaa)
lowercase__: Dict = ppmc + aaa
lowercase__: List[str] = -2 * pmpc
lowercase__: int = ppmc - aaa
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: int = 10 ** (gain_db / 40)
lowercase__: Dict = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase__: Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase__: Dict = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Optional[int] = big_a * (ppmc + aaa)
lowercase__: Dict = -2 * big_a * pmpc
lowercase__: Dict = big_a * (ppmc - aaa)
lowercase__: Tuple = pmc + aaa
lowercase__: Optional[int] = 2 * mpc
lowercase__: Union[str, Any] = pmc - aaa
lowercase__: Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = [randint(-10_00 , 10_00 ) for i in range(10 )]
SCREAMING_SNAKE_CASE__ : Dict = randint(-50_00 , 50_00 )
return (arr, r)
SCREAMING_SNAKE_CASE__ : Any = make_dataset()
def _a ( lowercase__ : list[int] , lowercase__ : int ):
'''simple docstring'''
for triplet in permutations(lowercase__ , 3 ):
if sum(lowercase__ ) == target:
return tuple(sorted(lowercase__ ) )
return (0, 0, 0)
def _a ( lowercase__ : list[int] , lowercase__ : int ):
'''simple docstring'''
arr.sort()
SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ )
for i in range(n - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '\ntriplet_sum1(*dataset)\n'
SCREAMING_SNAKE_CASE__ : Any = '\ntriplet_sum2(*dataset)\n'
SCREAMING_SNAKE_CASE__ : Optional[int] = repeat(setup=lowercase__ , stmt=lowercase__ , repeat=5 , number=1_00_00 )
SCREAMING_SNAKE_CASE__ : int = repeat(setup=lowercase__ , stmt=lowercase__ , repeat=5 , number=1_00_00 )
return (min(lowercase__ ), min(lowercase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE__ : Tuple = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 85 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : str = len(a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase( self : str )-> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ):
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : List[Any] = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ):
SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple()
def recursive_check(a_ : List[Any] , a_ : int ):
if isinstance(a_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ):
recursive_check(a_ , a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_ , a_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(a_ , a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
| 85 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# Load the entity vocab file
lowerCAmelCase = load_entity_vocab(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase = AddedToken("""<ent>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowerCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]]
lowerCAmelCase = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase, lowerCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
lowerCAmelCase = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="""entity_classification""" )
lowerCAmelCase = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowerCAmelCase = (39, 42)
lowerCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
lowerCAmelCase = torch.Size((1, 42, 1_024) )
lowerCAmelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
lowerCAmelCase = torch.Size((1, 42, 768) )
lowerCAmelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCAmelCase = torch.Size((1, 1, 1_024) )
lowerCAmelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
lowerCAmelCase = torch.Size((1, 1, 768) )
lowerCAmelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {}
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase, lowerCAmelCase = line.rstrip().split("""\t""" )
lowerCAmelCase = index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 344 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.0_2 , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=None , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def __snake_case ( self ) -> str:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ) -> Union[str, Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = ConvNextVaModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , A_ , A_ , A_ ) -> str:
lowerCAmelCase = ConvNextVaForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = ConvNextVaBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextVaBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Any = False
UpperCAmelCase : Any = False
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = False
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = ConvNextVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __snake_case ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ) -> List[Any]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __snake_case ( self ) -> List[str]:
pass
def __snake_case ( self ) -> Any:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = True
if model_class.__name__ in [
*get_values(A_ ),
*get_values(A_ ),
]:
continue
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.train()
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCAmelCase = model(**A_ ).loss
loss.backward()
def __snake_case ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = False
lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(A_ ), *get_values(A_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCAmelCase = model(**A_ ).loss
loss.backward()
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Optional[Any]:
def check_hidden_states_output(A_ , A_ , A_ ):
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __snake_case ( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A_ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = preprocessor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCAmelCase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) | 344 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any]=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__SCREAMING_SNAKE_CASE = [[w, v]]
if not self.graph.get(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if self.graph.get(lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Any=-2 ,lowerCamelCase : Optional[Any]=-1 ):
'''simple docstring'''
if s == d:
return []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if s == -2:
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return visited
def UpperCAmelCase__ ( self : str ,lowerCamelCase : str=-1 ):
'''simple docstring'''
if c == -1:
__SCREAMING_SNAKE_CASE = floor(random() * 1_0000 ) + 10
for i in range(lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__SCREAMING_SNAKE_CASE = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase ,lowerCamelCase ,1 )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str=-2 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = deque()
__SCREAMING_SNAKE_CASE = []
if s == -2:
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
d.append(lowerCamelCase )
visited.append(lowerCamelCase )
while d:
__SCREAMING_SNAKE_CASE = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : List[Any]=-2 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if s == -2:
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = -2
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE = True
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = False
indirect_parents.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return list(lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = -2
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE = True
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = False
indirect_parents.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return False
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Dict=-2 ,lowerCamelCase : Any=-1 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = time()
self.dfs(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = time()
return end - begin
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Any=-2 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = time()
self.bfs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = time()
return end - begin
class __a :
def __init__( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : Optional[int] ,lowerCamelCase : int=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__SCREAMING_SNAKE_CASE = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__SCREAMING_SNAKE_CASE = [[w, u]]
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : str ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.graph.get(lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase )
# the other way round
if self.graph.get(lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Any=-2 ,lowerCamelCase : Tuple=-1 ):
'''simple docstring'''
if s == d:
return []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if s == -2:
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return visited
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : List[str]=-1 ):
'''simple docstring'''
if c == -1:
__SCREAMING_SNAKE_CASE = floor(random() * 1_0000 ) + 10
for i in range(lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__SCREAMING_SNAKE_CASE = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase ,lowerCamelCase ,1 )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any]=-2 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = deque()
__SCREAMING_SNAKE_CASE = []
if s == -2:
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
d.append(lowerCamelCase )
visited.append(lowerCamelCase )
while d:
__SCREAMING_SNAKE_CASE = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = -2
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE = True
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = False
indirect_parents.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return list(lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = -2
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__SCREAMING_SNAKE_CASE = True
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = stack[len(lowerCamelCase ) - 1]
else:
__SCREAMING_SNAKE_CASE = False
indirect_parents.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = s
__SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int=-2 ,lowerCamelCase : List[str]=-1 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = time()
self.dfs(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = time()
return end - begin
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[str]=-2 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = time()
self.bfs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = time()
return end - begin
| 109 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48000,
"sample_size": 131072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return torch.atana(__UpperCAmelCase , __UpperCAmelCase ) / math.pi * 2
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCAmelCase , __UpperCAmelCase )
class __a ( _snake_case ):
pass
class __a ( nn.Module ):
def __init__( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCamelCase ,n_attn_layers=4 )
__SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
__SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 ,scramble=lowerCamelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
a = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
a = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
a = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
a = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
a = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
a = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return name.replace(__UpperCAmelCase , __UpperCAmelCase )
elif name.startswith(__UpperCAmelCase ):
return [name.replace(__UpperCAmelCase , __UpperCAmelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=13 ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__SCREAMING_SNAKE_CASE = 0
if string.startswith("""net.3.""" ):
depth += 1
__SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith("""net.""" ):
__SCREAMING_SNAKE_CASE = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__SCREAMING_SNAKE_CASE = string[7:]
if string.startswith("""main.""" ):
__SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
__SCREAMING_SNAKE_CASE = string[:2]
__SCREAMING_SNAKE_CASE = string[2:]
else:
__SCREAMING_SNAKE_CASE = string[0]
__SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
__SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = """mid_block"""
elif depth > 0 and int(__UpperCAmelCase ) < 7:
__SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""down_blocks.{depth}"""
elif depth > 0 and int(__UpperCAmelCase ) > 7:
__SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - 1}""" if int(__UpperCAmelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
__SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
__SCREAMING_SNAKE_CASE = convert_resconv_naming(__UpperCAmelCase )
elif "attentions" in new_layer:
__SCREAMING_SNAKE_CASE = convert_attn_naming(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = prefix + """.""" + new_layer + """.""" + string_left
else:
__SCREAMING_SNAKE_CASE = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__SCREAMING_SNAKE_CASE = rename(__UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = transform_conv_attns(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = v
return new_state_dict
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if len(__UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
__SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
__SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
__SCREAMING_SNAKE_CASE = v.shape[0]
__SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__SCREAMING_SNAKE_CASE = download(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_rate"""]
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_size"""]
__SCREAMING_SNAKE_CASE = Object()
__SCREAMING_SNAKE_CASE = sample_size
__SCREAMING_SNAKE_CASE = sample_rate
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=__UpperCAmelCase , sample_rate=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
__SCREAMING_SNAKE_CASE = DiffusionUncond(__UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCAmelCase )["""state_dict"""] )
__SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
__SCREAMING_SNAKE_CASE = orig_model.state_dict()
__SCREAMING_SNAKE_CASE = rename_orig_weights(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCAmelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(__UpperCAmelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__SCREAMING_SNAKE_CASE = value.squeeze()
__SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 100
__SCREAMING_SNAKE_CASE = 33
__SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.manual_seed(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=__UpperCAmelCase ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=__UpperCAmelCase )[:-1]
__SCREAMING_SNAKE_CASE = get_crash_schedule(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=__UpperCAmelCase , generator=__UpperCAmelCase ).audios
__SCREAMING_SNAKE_CASE = sampling.iplms_sample(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {} )
__SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
__SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
__SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __UpperCAmelCase )
print("""Diff max""" , __UpperCAmelCase )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
a = parser.parse_args()
main(args)
| 109 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
_lowerCamelCase : List[str] = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_lowerCamelCase : Any = components[:-1] + [test_fn.replace('''.py''', '''''' )]
_lowerCamelCase : Dict = '''.'''.join(A_ )
return test_module_path
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = get_module_path(A_ )
_lowerCamelCase : Optional[Any] = importlib.import_module(A_ )
return test_module
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : Any = get_test_module(A_ )
for attr in dir(A_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(A_, A_ ) )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[str] = get_test_module(A_ )
for attr in dir(A_ ):
_lowerCamelCase : List[Any] = getattr(A_, A_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowerCamelCase : Union[str, Any] = getattr(A_, '''all_model_classes''', [] )
if len(A_ ) > 0:
test_classes.append(A_ )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = get_test_classes(A_ )
_lowerCamelCase : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = test_class()
if hasattr(A_, '''setUp''' ):
test.setUp()
_lowerCamelCase : Tuple = None
if hasattr(A_, '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowerCamelCase : Tuple = test.model_tester.__class__
return model_tester
def snake_case_ ( A_ : Dict, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_test_classes(A_ )
_lowerCamelCase : Tuple = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A_ )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : str, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : int = get_test_classes_for_model(A_, A_ )
_lowerCamelCase : List[Any] = []
for test_class in test_classes:
_lowerCamelCase : Union[str, Any] = get_model_tester_from_test_class(A_ )
if tester_class is not None:
tester_classes.append(A_ )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_classes(A_ )
_lowerCamelCase : Union[str, Any] = {test_class: get_model_tester_from_test_class(A_ ) for test_class in test_classes}
return test_tester_mapping
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = get_model_classes(A_ )
_lowerCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(A_, A_ ) for model_class in model_classes
}
return model_test_mapping
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = get_model_classes(A_ )
_lowerCamelCase : int = {
model_class: get_tester_classes_for_model(A_, A_ ) for model_class in model_classes
}
return model_to_tester_mapping
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if isinstance(A_, A_ ):
return o
elif isinstance(A_, A_ ):
return o.__name__
elif isinstance(A_, (list, tuple) ):
return [to_json(A_ ) for x in o]
elif isinstance(A_, A_ ):
return {to_json(A_ ): to_json(A_ ) for k, v in o.items()}
else:
return o
| 598 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Union[str, Any]=3_2 ):
"""simple docstring"""
set_seed(0 )
_lowerCamelCase : str = UNetaDModel(sample_size=__lowerCAmelCase , in_channels=3 , out_channels=3 )
_lowerCamelCase : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_lowerCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_lowerCamelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__lowerCAmelCase ) for _ in range(4 )]
_lowerCamelCase : List[Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(__lowerCAmelCase ) for _ in range(4 )]
_lowerCamelCase : Any = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
_lowerCamelCase , _lowerCamelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowerCamelCase : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowerCamelCase : Any = model(__lowerCAmelCase , timesteps[i] ).sample
_lowerCamelCase : List[str] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_lowerCamelCase , _lowerCamelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowerCamelCase : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , timesteps[i] ).sample
_lowerCamelCase : List[Any] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
| 598 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Dict = (3, 32, 1_28)
_snake_case : Tuple = tempfile.mkdtemp()
# fmt: off
_snake_case : List[str] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_snake_case : int = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
_snake_case : Union[str, Any] = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
_snake_case : Tuple = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
_snake_case : Dict = Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) )
return image_input
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : List[str] = self.get_image_processor()
_snake_case : str = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : List[str] = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_snake_case : Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : int = self.prepare_image_inputs()
_snake_case : Optional[int] = image_processor(lowerCamelCase_ , return_tensors='np' )
_snake_case : Optional[int] = processor(images=lowerCamelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Union[str, Any] = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : List[str] = 'test'
_snake_case : Tuple = processor(text=lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Dict = self.get_image_processor()
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : List[Any] = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Optional[int] = 'test'
_snake_case : Union[str, Any] = self.prepare_image_inputs()
_snake_case : List[Any] = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Any = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : List[Any] = processor.char_decode(lowerCamelCase_ )
_snake_case : List[str] = tokenizer.batch_decode(lowerCamelCase_ )
_snake_case : Union[str, Any] = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Dict = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Any = None
_snake_case : List[Any] = self.prepare_image_inputs()
_snake_case : List[Any] = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Dict = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Any = torch.randn(1 , 27 , 38 )
_snake_case : Union[str, Any] = torch.randn(1 , 27 , 5_02_57 )
_snake_case : Dict = torch.randn(1 , 27 , 3_05_22 )
_snake_case : str = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 304 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "openai/whisper-base"
_UpperCamelCase : List[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCamelCase : Union[str, Any] = "transcriber"
_UpperCamelCase : Tuple = WhisperProcessor
_UpperCamelCase : Optional[Any] = WhisperForConditionalGeneration
_UpperCamelCase : Union[str, Any] = ["audio"]
_UpperCamelCase : Any = ["text"]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 304 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __A : Optional[Any] , __A : Dict=7 , __A : Optional[int]=3 , __A : Dict=18 , __A : List[str]=30 , __A : Dict=400 , __A : Any=True , __A : Union[str, Any]=None , __A : List[str]=True , __A : Dict=None , __A : Any=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : int=False , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = size if size is not None else {"""height""": 20, """width""": 20}
lowerCAmelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_reduce_labels
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowerCAmelCase( ) -> List[str]:
lowerCAmelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCAmelCase__ = Image.open(dataset[0]["""file"""] )
lowerCAmelCase__ = Image.open(dataset[1]["""file"""] )
return image, map
def _lowerCAmelCase( ) -> Any:
lowerCAmelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCAmelCase__ = Image.open(ds[0]["""file"""] )
lowerCAmelCase__ = Image.open(ds[1]["""file"""] )
lowerCAmelCase__ = Image.open(ds[2]["""file"""] )
lowerCAmelCase__ = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase__ ( _A, unittest.TestCase ):
'''simple docstring'''
A__ = BeitImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = BeitImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """center_crop""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , __A )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__A )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , __A )
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
lowerCAmelCase__ = []
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowerCAmelCase__ = image_processing(__A , __A , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCAmelCase__ ,lowerCAmelCase__ = prepare_semantic_single_inputs()
lowerCAmelCase__ = image_processing(__A , __A , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowerCAmelCase__ ,lowerCAmelCase__ = prepare_semantic_batch_inputs()
lowerCAmelCase__ = image_processing(__A , __A , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase__ ,lowerCAmelCase__ = prepare_semantic_single_inputs()
lowerCAmelCase__ = image_processing(__A , __A , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowerCAmelCase__ = True
lowerCAmelCase__ = image_processing(__A , __A , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 211 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = field(default='''audio-classification''', metadata={'''include_in_asdict_even_if_is_default''': True} )
A__ = Features({'''audio''': Audio()} )
A__ = Features({'''labels''': ClassLabel} )
A__ = "audio"
A__ = "labels"
def lowercase__ ( self : Optional[Any] , __A : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase__ = copy.deepcopy(self )
lowerCAmelCase__ = self.label_schema.copy()
lowerCAmelCase__ = features[self.label_column]
lowerCAmelCase__ = label_schema
return task_template
@property
def lowercase__ ( self : Optional[int] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 211 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : Dict=False , a_ : Tuple=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__a , __a , __a , __a = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__a = cached_file(a_ , a_ , force_download=not use_cached_models )
__a = config_class.from_json_file(a_ )
__a = True
__a = True
print(f"Building TensorFlow model from configuration: {config}" )
__a = model_class(a_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__a = cached_file(
a_ , a_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__a = load_pytorch_checkpoint_in_tfa_model(a_ , a_ )
if compare_with_pt_model:
__a = tf_model(tf_model.dummy_inputs , training=a_ ) # build the network
__a = torch.load(a_ , map_location='cpu' )
__a = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a_ , config=a_ , state_dict=a_ )
with torch.no_grad():
__a = pt_model(**pt_model.dummy_inputs )
__a = pto[0].numpy()
__a = tfo[0].numpy()
__a = np.amax(np.abs(np_pt - np_tf ) )
print(f"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(f"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(a_ , save_format='h5' )
def SCREAMING_SNAKE_CASE ( a_ : Any , a_ : Optional[int] , a_ : Union[str, Any]=None , a_ : int=None , a_ : Dict=False , a_ : Optional[int]=False , a_ : Optional[int]=False , a_ : Any=False , ):
if args_model_type is None:
__a = list(MODEL_CLASSES.keys() )
else:
__a = [args_model_type]
for j, model_type in enumerate(a_ , start=1 ):
print('=' * 100 )
print(f" Converting model type {j}/{len(a_ )}: {model_type}" )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__a , __a , __a , __a , __a = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__a = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__a = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a_ , a_ ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__a = model_shortcut_name
elif only_convert_finetuned_models:
print(f" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
f" Converting checkpoint {i}/{len(a_ )}: {model_shortcut_name} - model_type {model_type}" )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
__a = cached_file(a_ , a_ , force_download=not use_cached_models )
else:
__a = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__a = cached_file(a_ , a_ , force_download=not use_cached_models )
else:
__a = model_shortcut_name
if os.path.isfile(a_ ):
__a = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=a_ , pytorch_checkpoint_path=a_ , config_file=a_ , tf_dump_path=os.path.join(a_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=a_ , )
if remove_cached_files:
os.remove(a_ )
os.remove(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
UpperCAmelCase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 539 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a=0.01 , a=10_00 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self , a , a , a=False , a=True ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
_UpperCamelCase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A_ ( self , a , a , a , a=False , a=2 , a=False ) -> Any:
'''simple docstring'''
random.seed(a )
_UpperCamelCase = list(a )
_UpperCamelCase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 612 | 0 |
'''simple docstring'''
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__A ='pytorch_model.bin'
__A ='pytorch_model.bin.index.json'
__A ='adapter_config.json'
__A ='adapter_model.bin'
__A ='adapter_model.safetensors'
__A ='tf_model.h5'
__A ='tf_model.h5.index.json'
__A ='model.ckpt'
__A ='flax_model.msgpack'
__A ='flax_model.msgpack.index.json'
__A ='model.safetensors'
__A ='model.safetensors.index.json'
__A ='config.json'
__A ='preprocessor_config.json'
__A =FEATURE_EXTRACTOR_NAME
__A ='generation_config.json'
__A ='modelcard.json'
__A ='▁'
__A =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__A =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__A =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__A =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _UpperCamelCase ( UpperCamelCase__ ):
if version.parse(UpperCamelCase__ ) < version.parse(UpperCamelCase__ ):
if "dev" in min_version:
UpperCAmelCase__ : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
UpperCAmelCase__ : Union[str, Any] = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" ) | 714 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A =logging.get_logger(__name__)
class _snake_case ( a__ ):
lowerCAmelCase :int = ['''input_features''', '''is_longer''']
def __init__( self , _lowerCamelCase=64 , _lowerCamelCase=4_8000 , _lowerCamelCase=480 , _lowerCamelCase=10 , _lowerCamelCase=1024 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase = 0 , _lowerCamelCase = 1_4000 , _lowerCamelCase = None , _lowerCamelCase = "fusion" , _lowerCamelCase = "repeatpad" , **_lowerCamelCase , ):
super().__init__(
feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Dict = top_db
UpperCAmelCase__ : int = truncation
UpperCAmelCase__ : Optional[Any] = padding
UpperCAmelCase__ : Union[str, Any] = fft_window_size
UpperCAmelCase__ : List[Any] = (fft_window_size >> 1) + 1
UpperCAmelCase__ : Optional[int] = hop_length
UpperCAmelCase__ : int = max_length_s
UpperCAmelCase__ : Tuple = max_length_s * sampling_rate
UpperCAmelCase__ : List[Any] = sampling_rate
UpperCAmelCase__ : Tuple = frequency_min
UpperCAmelCase__ : Dict = frequency_max
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm=_lowerCamelCase , mel_scale="""htk""" , )
UpperCAmelCase__ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__)
UpperCAmelCase__ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : str = spectrogram(
_lowerCamelCase , window_function(self.fft_window_size , """hann""") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCamelCase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : List[Any] = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : Union[str, Any] = [0]
# randomly choose index for each part
UpperCAmelCase__ : List[str] = np.random.choice(ranges[0])
UpperCAmelCase__ : Optional[int] = np.random.choice(ranges[1])
UpperCAmelCase__ : List[str] = np.random.choice(ranges[2])
UpperCAmelCase__ : str = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase__ : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase__ : Tuple = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase__ : int = torch.tensor(mel[None, None, :])
UpperCAmelCase__ : Optional[int] = torch.nn.functional.interpolate(
_lowerCamelCase , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = mel_shrink[0][0].numpy()
UpperCAmelCase__ : Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase__ : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase__ : List[Any] = len(_lowerCamelCase) - max_length
UpperCAmelCase__ : Optional[Any] = np.random.randint(0 , overflow + 1)
UpperCAmelCase__ : int = waveform[idx : idx + max_length]
UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
UpperCAmelCase__ : Any = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters)
UpperCAmelCase__ : int = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase__ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase__ : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0)
UpperCAmelCase__ : List[Any] = False
else:
UpperCAmelCase__ : Tuple = self._random_mel_fusion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''')
else:
UpperCAmelCase__ : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase__ : int = int(max_length / len(_lowerCamelCase))
UpperCAmelCase__ : int = np.stack(np.tile(_lowerCamelCase , n_repeat + 1))[:max_length]
if padding == "repeatpad":
UpperCAmelCase__ : str = int(max_length / len(_lowerCamelCase))
UpperCAmelCase__ : str = np.stack(np.tile(_lowerCamelCase , _lowerCamelCase))
UpperCAmelCase__ : Optional[Any] = np.pad(_lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0)
if truncation == "fusion":
UpperCAmelCase__ : List[Any] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters)
UpperCAmelCase__ : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
UpperCAmelCase__ : int = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = truncation if truncation is not None else self.truncation
UpperCAmelCase__ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
UpperCAmelCase__ : int = isinstance(_lowerCamelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''')
UpperCAmelCase__ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCAmelCase__ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray):
UpperCAmelCase__ : Optional[int] = np.asarray(_lowerCamelCase , dtype=np.floataa)
elif isinstance(_lowerCamelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCAmelCase__ : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCAmelCase__ : int = [np.asarray(_lowerCamelCase)]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase__ : int = [
self._get_input_mel(_lowerCamelCase , max_length if max_length else self.nb_max_samples , _lowerCamelCase , _lowerCamelCase)
for waveform in raw_speech
]
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(_lowerCamelCase)
is_longer.append(_lowerCamelCase)
if truncation == "fusion" and sum(_lowerCamelCase) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase__ : Optional[int] = np.random.randint(0 , len(_lowerCamelCase))
UpperCAmelCase__ : Tuple = True
if isinstance(input_mel[0] , _lowerCamelCase):
UpperCAmelCase__ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase__ : str = [[longer] for longer in is_longer]
UpperCAmelCase__ : Optional[int] = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCAmelCase__ : Optional[int] = BatchFeature(_lowerCamelCase)
if return_tensors is not None:
UpperCAmelCase__ : str = input_features.convert_to_tensors(_lowerCamelCase)
return input_features | 113 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''LayoutLMv3FeatureExtractor''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 549 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
@dataclass
class a :
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class a :
SCREAMING_SNAKE_CASE : str = field(
default=__snake_case , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE : str = field(
default=__snake_case , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__snake_case , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase__ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features['label'].names
# Labels
lowerCamelCase_ = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(_lowerCamelCase : Any ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase_ = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase_ = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase_ = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase_ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=_lowerCamelCase )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _lowerCamelCase )
trainer.save_metrics('train' , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=_lowerCamelCase )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(_lowerCamelCase , metric_key_prefix='predict' )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('predict' , _lowerCamelCase )
trainer.save_metrics('predict' , _lowerCamelCase )
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_lowerCamelCase ):
lowerCamelCase_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 549 | 1 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = ''''''
for word_or_phrase in separated:
if not isinstance(snake_case__ , snake_case__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase__ = []
lowerCAmelCase__ = list(range(snake_case__ ) )
# Find permutation
while factorials:
lowerCAmelCase__ = factorials.pop()
lowerCAmelCase__ , lowerCAmelCase__ = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 | 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __snake_case ( lowercase : Union[str, Any] , lowercase : Dict ):
snake_case_ = XCLIPTextConfig()
# derive patch size from model name
snake_case_ = model_name.find("patch" )
snake_case_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
snake_case_ = XCLIPVisionConfig(patch_size=__snake_case , num_frames=__snake_case )
if "large" in model_name:
snake_case_ = 768
snake_case_ = 3_072
snake_case_ = 12
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 16
snake_case_ = 24
snake_case_ = 768
snake_case_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ = 336
snake_case_ = XCLIPConfig.from_text_vision_configs(__snake_case , __snake_case )
if "large" in model_name:
snake_case_ = 768
return config
def __snake_case ( lowercase : Tuple ):
if name == "token_embedding.weight":
snake_case_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
snake_case_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
snake_case_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
snake_case_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
snake_case_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
snake_case_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
snake_case_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
snake_case_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
snake_case_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
snake_case_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
snake_case_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
snake_case_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
snake_case_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
snake_case_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
snake_case_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
snake_case_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
snake_case_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
snake_case_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
snake_case_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def __snake_case ( lowercase : str , lowercase : Tuple ):
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(__snake_case )
if "attn.in_proj" in key:
snake_case_ = key.split("." )
if key.startswith("visual" ):
snake_case_ = key_split[3]
snake_case_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ = val[
:dim, :
]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[
-dim:, :
]
else:
snake_case_ = val[
:dim
]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[
-dim:
]
else:
if "weight" in key:
snake_case_ = val[
:dim, :
]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[
-dim:, :
]
else:
snake_case_ = val[:dim]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[-dim:]
elif key.startswith("mit" ):
snake_case_ = key_split[2]
snake_case_ = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = key_split[2]
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(__snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ = val.T
snake_case_ = val
return orig_state_dict
def __snake_case ( lowercase : int ):
if num_frames == 8:
snake_case_ = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ = """eating_spaghetti_32_frames.npy"""
snake_case_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=__snake_case , repo_type="dataset" , )
snake_case_ = np.load(__snake_case )
return list(__snake_case )
def __snake_case ( lowercase : Optional[Any] , lowercase : Union[str, Any]=None , lowercase : Tuple=False ):
snake_case_ = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ = model_to_url[model_name]
snake_case_ = 8
if "16-frames" in model_name:
snake_case_ = 16
elif "shot" in model_name:
snake_case_ = 32
snake_case_ = get_xclip_config(__snake_case , __snake_case )
snake_case_ = XCLIPModel(__snake_case )
model.eval()
if "drive" in checkpoint_url:
snake_case_ = """pytorch_model.bin"""
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
snake_case_ = torch.load(__snake_case , map_location="cpu" )["""model"""]
else:
snake_case_ = torch.hub.load_state_dict_from_url(__snake_case )["""model"""]
snake_case_ = convert_state_dict(__snake_case , __snake_case )
snake_case_ = XCLIPModel(__snake_case )
snake_case_ = model.load_state_dict(__snake_case , strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ = VideoMAEImageProcessor(size=__snake_case )
snake_case_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
snake_case_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
snake_case_ = XCLIPProcessor(image_processor=__snake_case , tokenizer=__snake_case )
snake_case_ = prepare_video(__snake_case )
snake_case_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=__snake_case , return_tensors="pt" , padding=__snake_case )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ = model(**__snake_case )
# Verify outputs
snake_case_ = outputs.logits_per_video
snake_case_ = logits_per_video.softmax(dim=1 )
print("Probs:" , __snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
processor.push_to_hub(__snake_case , organization="nielsr" )
slow_tokenizer.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 508 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : list[float] ) -> float:
"""simple docstring"""
A__ : List[Any] =0.00
A__ : Optional[int] =0
for resistor in resistors:
if resistor <= 0:
A__ : List[str] =f"Resistor at index {index} has a negative or zero value!"
raise ValueError(__snake_case )
first_sum += 1 / float(__snake_case )
index += 1
return 1 / first_sum
def __lowerCamelCase ( __snake_case : list[float] ) -> float:
"""simple docstring"""
A__ : Any =0.00
A__ : Optional[int] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A__ : Tuple =f"Resistor at index {index} has a negative value!"
raise ValueError(__snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( _UpperCamelCase ):
lowercase = (UnCLIPScheduler,)
def _lowercase( self , **A ) -> int:
UpperCAmelCase : Dict = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**A )
return config
def _lowercase( self ) -> Optional[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _lowercase( self ) -> List[str]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A )
def _lowercase( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def _lowercase( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=A )
def _lowercase( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A )
def _lowercase( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A , prev_timestep=A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : List[Any] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
UpperCAmelCase : Optional[Any] = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = self.scheduler_classes[0]
UpperCAmelCase : str = self.get_scheduler_config(variance_type="""learned_range""" )
UpperCAmelCase : Dict = scheduler_class(**A )
UpperCAmelCase : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=A ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=A ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=A ) - -0.0_0_1_0_0_1_1 < 1e-5
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Dict = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**A )
UpperCAmelCase : List[str] = scheduler.timesteps
UpperCAmelCase : Optional[Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
UpperCAmelCase : List[str] = model(A , A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : Any = scheduler.step(A , A , A , generator=A ).prev_sample
UpperCAmelCase : Tuple = pred_prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(A ) )
UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**A )
scheduler.set_timesteps(25 )
UpperCAmelCase : int = scheduler.timesteps
UpperCAmelCase : List[Any] = self.dummy_model()
UpperCAmelCase : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase : str = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
UpperCAmelCase : List[str] = model(A , A )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase : Union[str, Any] = None
else:
UpperCAmelCase : Tuple = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : int = scheduler.step(
A , A , A , prev_timestep=A , generator=A ).prev_sample
UpperCAmelCase : Dict = pred_prev_sample
UpperCAmelCase : List[Any] = torch.sum(torch.abs(A ) )
UpperCAmelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def _lowercase( self ) -> List[Any]:
pass
def _lowercase( self ) -> Optional[Any]:
pass
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = None
def __init__( self , a_=None , a_=None , a_=None , a_="<unk>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_=False , a_=False , **a_ , ) -> Union[str, Any]:
super().__init__(
a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , add_prefix_space=a_ , clean_up_tokenization_spaces=a_ , **a_ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowercase : Any = getattr(a_ , pre_tok_state.pop("type" ) )
lowercase : Union[str, Any] = add_prefix_space
lowercase : int = pre_tok_class(**a_ )
lowercase : Tuple = add_prefix_space
def a__ ( self , *a_ , **a_ ) -> Union[str, Any]:
lowercase : str = kwargs.get("is_split_into_words" , a_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*a_ , **a_ )
def a__ ( self , *a_ , **a_ ) -> str:
lowercase : Tuple = kwargs.get("is_split_into_words" , a_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*a_ , **a_ )
def a__ ( self , a_ , a_ = None ) -> List[str]:
lowercase : Optional[Any] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def a__ ( self , a_ ) -> Tuple:
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a_ , add_special_tokens=a_ ) + [self.eos_token_id] )
if len(a_ ) > self.model_max_length:
lowercase : str = input_ids[-self.model_max_length :]
return input_ids
| 372 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
super().__init__()
_UpperCAmelCase : Dict = value_function
_UpperCAmelCase : Optional[int] = unet
_UpperCAmelCase : Optional[Any] = scheduler
_UpperCAmelCase : List[str] = env
_UpperCAmelCase : Union[str, Any] = env.get_dataset()
_UpperCAmelCase : Union[str, Any] = {}
for key in self.data.keys():
try:
_UpperCAmelCase : Any = self.data[key].mean()
except: # noqa: E722
pass
_UpperCAmelCase : Dict = {}
for key in self.data.keys():
try:
_UpperCAmelCase : Tuple = self.data[key].std()
except: # noqa: E722
pass
_UpperCAmelCase : Optional[Any] = env.observation_space.shape[0]
_UpperCAmelCase : str = env.action_space.shape[0]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return (x_in - self.means[key]) / self.stds[key]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return x_in * self.stds[key] + self.means[key]
def snake_case_ (self , lowerCAmelCase__ ):
if type(lowerCAmelCase__ ) is dict:
return {k: self.to_torch(lowerCAmelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCAmelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCAmelCase__ , device=self.unet.device )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for key, val in cond.items():
_UpperCAmelCase : int = val.clone()
return x_in
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = x.shape[0]
_UpperCAmelCase : List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_UpperCAmelCase : Optional[Any] = torch.full((batch_size,) , lowerCAmelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCAmelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCAmelCase : Optional[Any] = self.value_function(x.permute(0 , 2 , 1 ) , lowerCAmelCase__ ).sample
_UpperCAmelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
_UpperCAmelCase : Any = self.scheduler._get_variance(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.exp(0.5 * posterior_variance )
_UpperCAmelCase : Optional[Any] = model_std * grad
_UpperCAmelCase : int = 0
_UpperCAmelCase : str = x.detach()
_UpperCAmelCase : Union[str, Any] = x + scale * grad
_UpperCAmelCase : Union[str, Any] = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim )
_UpperCAmelCase : Dict = self.unet(x.permute(0 , 2 , 1 ) , lowerCAmelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_UpperCAmelCase : Tuple = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , predict_epsilon=lowerCAmelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
_UpperCAmelCase : Dict = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim )
_UpperCAmelCase : Optional[Any] = self.to_torch(lowerCAmelCase__ )
return x, y
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__=6_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 ):
# normalize the observations and create batch dimension
_UpperCAmelCase : Tuple = self.normalize(lowerCAmelCase__ , """observations""" )
_UpperCAmelCase : Dict = obs[None].repeat(lowerCAmelCase__ , axis=0 )
_UpperCAmelCase : str = {0: self.to_torch(lowerCAmelCase__ )}
_UpperCAmelCase : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCAmelCase : int = randn_tensor(lowerCAmelCase__ , device=self.unet.device )
_UpperCAmelCase : int = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim )
_UpperCAmelCase : Dict = self.to_torch(lowerCAmelCase__ )
# run the diffusion process
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.run_diffusion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# sort output trajectories by value
_UpperCAmelCase : List[str] = y.argsort(0 , descending=lowerCAmelCase__ ).squeeze()
_UpperCAmelCase : List[str] = x[sorted_idx]
_UpperCAmelCase : Dict = sorted_values[:, :, : self.action_dim]
_UpperCAmelCase : Tuple = actions.detach().cpu().numpy()
_UpperCAmelCase : Optional[int] = self.de_normalize(lowerCAmelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
_UpperCAmelCase : List[Any] = 0
else:
# if we didn't run value guiding, select a random action
_UpperCAmelCase : str = np.random.randint(0 , lowerCAmelCase__ )
_UpperCAmelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 156 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : List[str] = KandinskyVaaControlnetImgaImgPipeline
snake_case : str = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
snake_case : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
snake_case : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case : Tuple = False
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return self.time_input_dim
@property
def snake_case_ (self ):
return self.time_input_dim * 4
@property
def snake_case_ (self ):
return 1_0_0
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Dict = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_UpperCAmelCase : Dict = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def snake_case_ (self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.dummy_unet
_UpperCAmelCase : str = self.dummy_movq
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_UpperCAmelCase : List[str] = DDIMScheduler(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
_UpperCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
_UpperCAmelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : Dict = """cpu"""
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Tuple = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Dict = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_UpperCAmelCase : str = init_image.resize((5_1_2, 5_1_2) )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_UpperCAmelCase : Any = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 2_5_5.0
_UpperCAmelCase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCAmelCase : List[Any] = """A robot, 4k photo"""
_UpperCAmelCase : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_UpperCAmelCase : Any = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_UpperCAmelCase : List[str] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.8_5 , generator=lowerCAmelCase__ , negative_prompt="""""" , ).to_tuple()
_UpperCAmelCase : Optional[Any] = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 156 | 1 |
"""simple docstring"""
from __future__ import annotations
def a ( __snake_case : list[int] ):
'''simple docstring'''
return len(set(__snake_case ) ) == len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , snake_case : int = 128 , snake_case : int = 256 , snake_case : float = 2_000.0 , snake_case : int = 768 , snake_case : int = 12 , snake_case : int = 12 , snake_case : int = 64 , snake_case : int = 2_048 , snake_case : float = 0.1 , ):
super().__init__()
UpperCAmelCase_ :Optional[Any] = nn.Sequential(
nn.Linear(snake_case , d_model * 4 , bias=snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case ) , nn.SiLU() , )
UpperCAmelCase_ :Optional[int] = nn.Embedding(snake_case , snake_case )
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :Union[str, Any] = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Any = nn.Dropout(p=snake_case )
UpperCAmelCase_ :Any = nn.ModuleList()
for lyr_num in range(snake_case ):
# FiLM conditional T5 decoder
UpperCAmelCase_ :List[str] = DecoderLayer(d_model=snake_case , d_kv=snake_case , num_heads=snake_case , d_ff=snake_case , dropout_rate=snake_case )
self.decoders.append(snake_case )
UpperCAmelCase_ :List[Any] = TaLayerNorm(snake_case )
UpperCAmelCase_ :str = nn.Dropout(p=snake_case )
UpperCAmelCase_ :str = nn.Linear(snake_case , snake_case , bias=snake_case )
def snake_case_ ( self : str , snake_case : Tuple , snake_case : Optional[int] ):
UpperCAmelCase_ :Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def snake_case_ ( self : Optional[Any] , snake_case : str , snake_case : str , snake_case : Optional[Any] ):
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase_ :Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase_ :Tuple = self.conditioning_emb(snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase_ :Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase_ :List[str] = torch.broadcast_to(
torch.arange(snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase_ :int = self.position_encoding(snake_case )
UpperCAmelCase_ :int = self.continuous_inputs_projection(snake_case )
inputs += position_encodings
UpperCAmelCase_ :Dict = self.dropout(snake_case )
# decoder: No padding present.
UpperCAmelCase_ :int = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase_ :int = [(x, self.encoder_decoder_mask(snake_case , snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase_ :Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase_ :List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase_ :Optional[Any] = lyr(
snake_case , conditioning_emb=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )[0]
UpperCAmelCase_ :List[str] = self.decoder_norm(snake_case )
UpperCAmelCase_ :Dict = self.post_dropout(snake_case )
UpperCAmelCase_ :Optional[int] = self.spec_out(snake_case )
return spec_out
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Any , snake_case : Optional[Any] , snake_case : int=1e-6 ):
super().__init__()
UpperCAmelCase_ :Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case , d_kv=snake_case , num_heads=snake_case , dropout_rate=snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case , d_kv=snake_case , num_heads=snake_case , dropout_rate=snake_case , layer_norm_epsilon=snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case , d_ff=snake_case , dropout_rate=snake_case , layer_norm_epsilon=snake_case ) )
def snake_case_ ( self : List[str] , snake_case : Dict , snake_case : Tuple=None , snake_case : int=None , snake_case : Dict=None , snake_case : Dict=None , snake_case : Optional[int]=None , ):
UpperCAmelCase_ :str = self.layer[0](
snake_case , conditioning_emb=snake_case , attention_mask=snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase_ :Dict = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase_ :str = self.layer[1](
snake_case , key_value_states=snake_case , attention_mask=snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase_ :Union[str, Any] = self.layer[-1](snake_case , snake_case )
return (hidden_states,)
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case : Dict , snake_case : str , snake_case : int , snake_case : Union[str, Any] ):
super().__init__()
UpperCAmelCase_ :List[Any] = TaLayerNorm(snake_case )
UpperCAmelCase_ :str = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case )
UpperCAmelCase_ :Optional[int] = Attention(query_dim=snake_case , heads=snake_case , dim_head=snake_case , out_bias=snake_case , scale_qk=snake_case )
UpperCAmelCase_ :Dict = nn.Dropout(snake_case )
def snake_case_ ( self : Any , snake_case : Tuple , snake_case : Tuple=None , snake_case : List[Any]=None , ):
# pre_self_attention_layer_norm
UpperCAmelCase_ :List[Any] = self.layer_norm(snake_case )
if conditioning_emb is not None:
UpperCAmelCase_ :Optional[int] = self.FiLMLayer(snake_case , snake_case )
# Self-attention block
UpperCAmelCase_ :Any = self.attention(snake_case )
UpperCAmelCase_ :str = hidden_states + self.dropout(snake_case )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : Optional[int] ):
super().__init__()
UpperCAmelCase_ :List[Any] = Attention(query_dim=snake_case , heads=snake_case , dim_head=snake_case , out_bias=snake_case , scale_qk=snake_case )
UpperCAmelCase_ :Optional[int] = TaLayerNorm(snake_case , eps=snake_case )
UpperCAmelCase_ :int = nn.Dropout(snake_case )
def snake_case_ ( self : str , snake_case : List[str] , snake_case : List[str]=None , snake_case : str=None , ):
UpperCAmelCase_ :List[Any] = self.layer_norm(snake_case )
UpperCAmelCase_ :Any = self.attention(
snake_case , encoder_hidden_states=snake_case , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase_ :str = hidden_states + self.dropout(snake_case )
return layer_output
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , snake_case : int , snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple ):
super().__init__()
UpperCAmelCase_ :Dict = TaDenseGatedActDense(d_model=snake_case , d_ff=snake_case , dropout_rate=snake_case )
UpperCAmelCase_ :List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case )
UpperCAmelCase_ :Union[str, Any] = TaLayerNorm(snake_case , eps=snake_case )
UpperCAmelCase_ :Optional[int] = nn.Dropout(snake_case )
def snake_case_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int]=None ):
UpperCAmelCase_ :List[Any] = self.layer_norm(snake_case )
if conditioning_emb is not None:
UpperCAmelCase_ :Tuple = self.film(snake_case , snake_case )
UpperCAmelCase_ :Any = self.DenseReluDense(snake_case )
UpperCAmelCase_ :List[Any] = hidden_states + self.dropout(snake_case )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , snake_case : Optional[int] , snake_case : int , snake_case : str ):
super().__init__()
UpperCAmelCase_ :Any = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Optional[int] = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Dict = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Optional[int] = nn.Dropout(snake_case )
UpperCAmelCase_ :str = NewGELUActivation()
def snake_case_ ( self : int , snake_case : Any ):
UpperCAmelCase_ :List[str] = self.act(self.wi_a(snake_case ) )
UpperCAmelCase_ :str = self.wi_a(snake_case )
UpperCAmelCase_ :str = hidden_gelu * hidden_linear
UpperCAmelCase_ :Any = self.dropout(snake_case )
UpperCAmelCase_ :int = self.wo(snake_case )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : Any=1e-6 ):
super().__init__()
UpperCAmelCase_ :List[str] = nn.Parameter(torch.ones(snake_case ) )
UpperCAmelCase_ :List[str] = eps
def snake_case_ ( self : Union[str, Any] , snake_case : Union[str, Any] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase_ :str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case )
UpperCAmelCase_ :int = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase_ :List[str] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def snake_case_ ( self : int , snake_case : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(snake_case , 3.0 )) ))
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , snake_case : Optional[Any] , snake_case : Tuple ):
super().__init__()
UpperCAmelCase_ :Optional[int] = nn.Linear(snake_case , out_features * 2 , bias=snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : Any , snake_case : Tuple ):
UpperCAmelCase_ :Tuple = self.scale_bias(snake_case )
UpperCAmelCase_ ,UpperCAmelCase_ :Union[str, Any] = torch.chunk(snake_case , 2 , -1 )
UpperCAmelCase_ :List[Any] = x * (1 + scale) + shift
return x
| 608 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase_ :
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCAmelCase )} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowercase_ :
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCamelCase = field(default=_UpperCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_lowerCamelCase = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCamelCase ( self ):
if self.train_file is not None:
_snake_case : Optional[int] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_snake_case : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case (__lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
_snake_case : Dict = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = {c: dataset[c] for c in dataset.column_names}
_snake_case : Union[str, Any] = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def snake_case () -> Optional[int]:
'''simple docstring'''
_snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_snake_case : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
_snake_case : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
_snake_case : int = {}
if data_args.train_file is not None:
_snake_case : Any = data_args.train_file
if data_args.validation_file is not None:
_snake_case : str = data_args.validation_file
_snake_case : Optional[int] = data_args.train_file.split("." )[-1]
if extension == "txt":
_snake_case : Union[str, Any] = """text"""
_snake_case : Dict = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Any = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
_snake_case : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
_snake_case : Any = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
_snake_case : List[str] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_snake_case : Optional[Any] = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_snake_case : List[Any] = datasets["""train"""].column_names
else:
_snake_case : Tuple = datasets["""validation"""].column_names
_snake_case : Union[str, Any] = """text""" if """text""" in column_names else column_names[0]
_snake_case : Optional[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
_snake_case : List[Any] = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length )
_snake_case : Any = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_snake_case : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_snake_case : Any = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_snake_case : str = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_snake_case : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
_snake_case : List[Any] = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_snake_case : str = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_snake_case : Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_snake_case : str = model_args.model_name_or_path
else:
_snake_case : Tuple = None
_snake_case : int = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_snake_case : Optional[Any] = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
_snake_case : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_snake_case : List[Any] = trainer.evaluate()
_snake_case : Optional[Any] = math.exp(eval_output["eval_loss"] )
_snake_case : List[str] = perplexity
_snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 704 | import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
__SCREAMING_SNAKE_CASE : int = 'bert-base-cased'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'fp16'
__SCREAMING_SNAKE_CASE : str = 'bf16'
__SCREAMING_SNAKE_CASE : Optional[int] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase_ ):
_snake_case : Optional[Any] = self.dist_env.copy()
_snake_case : List[str] = f"""{i + 1}"""
_snake_case : int = strategy
with mockenv_context(**lowercase_ ):
_snake_case : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase_ ):
_snake_case : List[str] = self.dist_env.copy()
_snake_case : List[Any] = prefetch_policy
with mockenv_context(**lowercase_ ):
_snake_case : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase_ ):
_snake_case : str = self.dist_env.copy()
_snake_case : List[str] = state_dict_type
with mockenv_context(**lowercase_ ):
_snake_case : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase ( self ):
_snake_case : Tuple = AutoModel.from_pretrained(lowercase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
_snake_case : Optional[Any] = self.dist_env.copy()
_snake_case : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_snake_case : List[str] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_snake_case : str = "2000"
with mockenv_context(**lowercase_ ):
_snake_case : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_snake_case : str = self.dist_env.copy()
_snake_case : Tuple = "TRANSFORMER_BASED_WRAP"
_snake_case : Union[str, Any] = "T5Layer"
with mockenv_context(**lowercase_ ):
_snake_case : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_snake_case : str = self.dist_env.copy()
_snake_case : Any = "SIZE_BASED_WRAP"
_snake_case : str = "0"
with mockenv_context(**lowercase_ ):
_snake_case : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_snake_case : Union[str, Any] = self.dist_env.copy()
_snake_case : int = mp_dtype
with mockenv_context(**lowercase_ ):
_snake_case : str = Accelerator()
if mp_dtype == "fp16":
_snake_case : List[str] = torch.floataa
elif mp_dtype == "bf16":
_snake_case : Any = torch.bfloataa
_snake_case : Dict = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase_ )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_snake_case : Union[str, Any] = self.dist_env.copy()
_snake_case : Tuple = str(lowercase_ ).lower()
with mockenv_context(**lowercase_ ):
_snake_case : Dict = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Dict = 0.82
_snake_case : str = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_snake_case : Tuple = {
"multi_gpu_fp16": 3_200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_snake_case : Tuple = 160
_snake_case : Optional[int] = 160
_snake_case : Optional[Any] = inspect.getfile(accelerate.test_utils )
_snake_case : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_snake_case : int = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_snake_case : str = cmd.copy()
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def UpperCamelCase ( self ):
_snake_case : Tuple = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_snake_case : str = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(lowercase_ ):
_snake_case : str = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_snake_case : int = len(lowercase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_snake_case : int = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
_snake_case : Union[str, Any] = cmd_config[:-1]
_snake_case : Dict = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def UpperCamelCase ( self ):
_snake_case : List[Any] = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_snake_case : Any = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_snake_case : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() ) | 580 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_SCREAMING_SNAKE_CASE ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """time_series_transformer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : List[str]=True , **SCREAMING_SNAKE_CASE : Dict , ):
# time series specific configuration
lowercase__ : Optional[Any] = prediction_length
lowercase__ : List[Any] = context_length or prediction_length
lowercase__ : List[str] = distribution_output
lowercase__ : str = loss
lowercase__ : Optional[int] = input_size
lowercase__ : List[Any] = num_time_features
lowercase__ : List[Any] = lags_sequence
lowercase__ : Tuple = scaling
lowercase__ : Any = num_dynamic_real_features
lowercase__ : Optional[Any] = num_static_real_features
lowercase__ : Tuple = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Any = cardinality
else:
lowercase__ : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : List[Any] = embedding_dimension
else:
lowercase__ : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Tuple = num_parallel_samples
# Transformer architecture configuration
lowercase__ : List[str] = input_size * len(SCREAMING_SNAKE_CASE ) + self._number_of_features
lowercase__ : List[str] = d_model
lowercase__ : Tuple = encoder_attention_heads
lowercase__ : Any = decoder_attention_heads
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : int = decoder_ffn_dim
lowercase__ : Any = encoder_layers
lowercase__ : Union[str, Any] = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Dict = encoder_layerdrop
lowercase__ : Optional[int] = decoder_layerdrop
lowercase__ : int = activation_function
lowercase__ : Any = init_std
lowercase__ : Tuple = use_cache
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 496 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
while b:
__magic_name__, __magic_name__: str = b, a % b
return a
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__UpperCAmelCase , a % b )
def a ( ) -> int:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 213 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __magic_name__ ( _a):
_UpperCAmelCase : Tuple = 'distilbert'
_UpperCAmelCase : List[Any] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : int ,__SCREAMING_SNAKE_CASE : Dict=3_0_5_2_2 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Tuple=False ,__SCREAMING_SNAKE_CASE : Optional[Any]=6 ,__SCREAMING_SNAKE_CASE : List[Any]=1_2 ,__SCREAMING_SNAKE_CASE : List[str]=7_6_8 ,__SCREAMING_SNAKE_CASE : Optional[int]=4 * 7_6_8 ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : int="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.02 ,__SCREAMING_SNAKE_CASE : int=0.1 ,__SCREAMING_SNAKE_CASE : str=0.2 ,__SCREAMING_SNAKE_CASE : List[Any]=0 ,**__SCREAMING_SNAKE_CASE : Optional[Any] ,):
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = sinusoidal_pos_embds
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dim
UpperCAmelCase = hidden_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation
UpperCAmelCase = initializer_range
UpperCAmelCase = qa_dropout
UpperCAmelCase = seq_classif_dropout
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE )
class __magic_name__ ( _a):
@property
def _UpperCAmelCase ( self : Any ):
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 333 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE__ = False
class a_ ( unittest.TestCase ):
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return 12
@property
def A__ ( self ) -> int:
"""simple docstring"""
return 12
@property
def A__ ( self ) -> int:
"""simple docstring"""
return 32
@property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = 12
UpperCamelCase = 12
UpperCamelCase = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
UpperCamelCase = TransformeraDModel(**_SCREAMING_SNAKE_CASE )
return model
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.dummy_vqvae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_transformer
UpperCamelCase = VQDiffusionScheduler(self.num_embed )
UpperCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = VQDiffusionPipeline(
vqvae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """teddy bear playing in the pool"""
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=_SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.dummy_vqvae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_transformer
UpperCamelCase = VQDiffusionScheduler(self.num_embed )
UpperCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_SCREAMING_SNAKE_CASE , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCamelCase = VQDiffusionPipeline(
vqvae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """teddy bear playing in the pool"""
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=_SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
UpperCamelCase = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
UpperCamelCase = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 301 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : List[Any] , A : Union[str, Any] , A : Dict=13 , A : Dict=64 , A : int=2 , A : Optional[Any]=3 , A : List[str]=True , A : Union[str, Any]=True , A : List[str]=32 , A : str=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : List[str]=0.1 , A : Dict=0.1 , A : List[str]=10 , A : List[Any]=0.02 , A : Tuple=[1, 16, 4, 4] , A : int=None , ) -> Tuple:
lowercase_ : Tuple = parent
lowercase_ : int = batch_size
lowercase_ : Any = image_size
lowercase_ : Dict = patch_size
lowercase_ : int = num_channels
lowercase_ : str = is_training
lowercase_ : Tuple = use_labels
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Dict = type_sequence_label_size
lowercase_ : Tuple = initializer_range
lowercase_ : Optional[Any] = scope
lowercase_ : Optional[int] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase_ : Tuple = (self.image_size // 32) ** 2
lowercase_ : int = num_patches + 1
def A ( self : str ) -> Dict:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Optional[Any] = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A , )
def A ( self : List[Any] , A : str , A : Any , A : Optional[Any] ) -> List[str]:
lowercase_ : int = ViTHybridModel(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , A : List[str] , A : List[str] , A : Any ) -> Optional[int]:
lowercase_ : Dict = self.type_sequence_label_size
lowercase_ : int = ViTHybridForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : Optional[int] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Dict ) -> str:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def A ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = ViTHybridModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def A ( self : str ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
def A ( self : List[str] ) -> Any:
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def A ( self : List[Any] ) -> Any:
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(A )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : Any ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def A ( self : str ) -> Dict:
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = _config_zero_init(A )
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(config=A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase_ : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def A ( self : Union[str, Any] ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = ViTHybridModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Dict ) -> Optional[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : str ) -> Dict:
lowercase_ : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A )
lowercase_ : int = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**A )
# verify the logits
lowercase_ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : int ) -> List[Any]:
lowercase_ : List[str] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowercase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
lowercase_ : Any = prepare_img()
lowercase_ : Union[str, Any] = image_processor(images=A , return_tensors='''pt''' )
lowercase_ : Any = model(**A )
lowercase_ : str = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase_ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 141 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowercase_ : Any = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
def lowercase ( __snake_case : Any ):
lowercase_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ : Dict = value
else:
lowercase_ : Tuple = value
return new_state_dict
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : Optional[int] = ''''''
if is_panoptic:
lowercase_ : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:2_5_6, :]
lowercase_ : Tuple = in_proj_bias[:2_5_6]
lowercase_ : Optional[Any] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ : str = in_proj_bias[2_5_6:5_1_2]
lowercase_ : str = in_proj_weight[-2_5_6:, :]
lowercase_ : Tuple = in_proj_bias[-2_5_6:]
def lowercase ( ):
lowercase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : List[Any] ):
lowercase_ : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase_ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
lowercase_ : Any = True
lowercase_ : int = '''panoptic''' in model_name
if is_panoptic:
lowercase_ : List[Any] = 2_5_0
else:
lowercase_ : List[Any] = 9_1
lowercase_ : List[str] = '''huggingface/label-files'''
lowercase_ : Union[str, Any] = '''coco-detection-id2label.json'''
lowercase_ : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Any = idalabel
lowercase_ : Any = {v: k for k, v in idalabel.items()}
# load image processor
lowercase_ : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase_ : Tuple = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowercase_ : Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __snake_case , pretrained=__snake_case ).eval()
lowercase_ : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase_ : Union[str, Any] = '''conditional_detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case )
lowercase_ : int = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ : List[Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase_ : Optional[int] = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ : str = state_dict.pop(__snake_case )
lowercase_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase_ : Dict = state_dict.pop(__snake_case )
lowercase_ : Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ : Tuple = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase_ : Dict = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase_ : Optional[int] = conditional_detr(__snake_case )
lowercase_ : List[str] = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__A : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 141 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
__SCREAMING_SNAKE_CASE = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
config.addinivalue_line('markers' ,'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =tmp_path_factory.getbasetemp() / 'cache'
SCREAMING_SNAKE_CASE_ : Dict =test_hf_cache_home / 'datasets'
SCREAMING_SNAKE_CASE_ : str =test_hf_cache_home / 'metrics'
SCREAMING_SNAKE_CASE_ : List[Any] =test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' ,str(lowerCAmelCase_ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' ,str(lowerCAmelCase_ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' ,str(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] =test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' ,str(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : int =test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' ,str(lowerCAmelCase_ ) )
@pytest.fixture(autouse=lowerCAmelCase_ ,scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' ,lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' ,lowerCAmelCase_ )
| 220 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__SCREAMING_SNAKE_CASE = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__SCREAMING_SNAKE_CASE = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__SCREAMING_SNAKE_CASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> dict[str, int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : tuple ) -> str:
"""simple docstring"""
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =get_letter_count(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : dict[int, list[str]] ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : dict[int, str] ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE_ : List[Any] =list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase_ ,reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : list[str] =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =get_frequency_order(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : str = MvpTokenizer
A__ : Union[str, Any] = MvpTokenizerFast
A__ : int = True
A__ : Any = filter_roberta_detectors
def __A ( self ) -> List[Any]:
super().setUp()
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def __A ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __A ( self , **snake_case_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __A ( self , snake_case_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def __A ( self ) -> int:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def __A ( self ) -> Union[str, Any]:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def __A ( self ) -> str:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def __A ( self ) -> Tuple:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case_ )
self.assertIn("attention_mask" , snake_case_ )
self.assertNotIn("labels" , snake_case_ )
self.assertNotIn("decoder_attention_mask" , snake_case_ )
@require_torch
def __A ( self ) -> Tuple:
_UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(text_target=snake_case_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __A ( self ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __A ( self ) -> List[str]:
_UpperCAmelCase = ["A long paragraph for summarization."]
_UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = inputs["input_ids"]
_UpperCAmelCase = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_UpperCAmelCase = "A, <mask> AllenNLP sentence."
_UpperCAmelCase = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
_UpperCAmelCase = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 579 |
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
SCREAMING_SNAKE_CASE_ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
SCREAMING_SNAKE_CASE_ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __A ( self , snake_case_ , snake_case_ ) -> Dict:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 579 | 1 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = val
_snake_case = None
_snake_case = None
def A ( self : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_snake_case = Node(lowercase )
else:
self.left.insert(lowercase )
elif val > self.val:
if self.right is None:
_snake_case = Node(lowercase )
else:
self.right.insert(lowercase )
else:
_snake_case = val
def a_ ( __lowercase : Union[str, Any] , __lowercase : Any ) -> List[Any]:
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def a_ ( __lowercase : Any ) -> Optional[int]:
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
_snake_case = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_snake_case = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13])) | 686 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Union[str, Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1_60_00 ):
lowerCamelCase_ : List[str] = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
lowerCamelCase_ : int = randint(0 ,len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(default=snake_case__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_a : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_a : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a : float = field(
default=2_0 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCamelCase_ :
_a : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_a : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( self : Optional[int] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase_ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase_ : Optional[int] = DatasetDict()
lowerCamelCase_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--label_column_name` to the correct text column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase_ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase_ : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase_ : Union[str, Any] = random_subsample(
audio['array'] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
lowerCamelCase_ : int = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[Any] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowerCamelCase_ : Optional[Any] = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[int] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase_ : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = str(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
lowerCamelCase_ : Tuple = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=lowerCAmelCase__ ,references=eval_pred.label_ids )
lowerCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowerCAmelCase__ ) ,labelaid=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,finetuning_task='audio-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
lowerCamelCase_ : str = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=raw_datasets['train'] if training_args.do_train else None ,eval_dataset=raw_datasets['eval'] if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
lowerCamelCase_ : Optional[int] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ : str = trainer.evaluate()
trainer.log_metrics('eval' ,lowerCAmelCase__ )
trainer.save_metrics('eval' ,lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowerCamelCase_ : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 364 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a__ , a__: Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a__: Any = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase__ = input('Enter numbers separated by a comma:\n').strip()
lowercase__ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 217 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __snake_case ( __lowerCAmelCase ):
a__ = """speech_to_text"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase=1_00_00 , lowercase=12 , lowercase=20_48 , lowercase=4 , lowercase=6 , lowercase=20_48 , lowercase=4 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="relu" , lowercase=2_56 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=60_00 , lowercase=10_24 , lowercase=2 , lowercase=(5, 5) , lowercase=10_24 , lowercase=80 , lowercase=1 , **lowercase , ) -> List[str]:
'''simple docstring'''
a__: int = vocab_size
a__: Any = d_model
a__: List[str] = encoder_ffn_dim
a__: int = encoder_layers
a__: int = encoder_attention_heads
a__: int = decoder_ffn_dim
a__: Optional[int] = decoder_layers
a__: Optional[Any] = decoder_attention_heads
a__: str = dropout
a__: List[Any] = attention_dropout
a__: Union[str, Any] = activation_dropout
a__: Tuple = activation_function
a__: Optional[Any] = init_std
a__: List[str] = encoder_layerdrop
a__: Optional[int] = decoder_layerdrop
a__: Union[str, Any] = use_cache
a__: Union[str, Any] = encoder_layers
a__: str = scale_embedding # scale factor will be sqrt(d_model) if True
a__: Tuple = max_source_positions
a__: Union[str, Any] = max_target_positions
a__: List[str] = num_conv_layers
a__: Union[str, Any] = list(lowercase)
a__: Dict = conv_channels
a__: List[Any] = input_feat_per_channel
a__: Any = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.')
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 217 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __snake_case ( ) -> Generator[int, None, None]:
lowercase : dict[int, int] = {}
lowercase : Union[str, Any] = 2
while True:
lowercase : Union[str, Any] = factor_map.pop(__A ,__A )
if factor:
lowercase : int = factor + prime
while x in factor_map:
x += factor
lowercase : Dict = factor
else:
lowercase : Dict = prime
yield prime
prime += 1
def __snake_case ( __A = 1E10 ) -> int:
lowercase : Union[str, Any] = sieve()
lowercase : Optional[int] = 1
while True:
lowercase : int = next(__A )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__A )
n += 2
if __name__ == "__main__":
print(solution())
| 607 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: Tuple =logging.get_logger(__name__)
lowerCAmelCase: int =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __snake_case ( __A ) -> Dict:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase : Any = k.replace(__A ,__A )
if k.startswith("""encoder""" ):
lowercase : List[str] = k.replace(""".attn""" ,""".self_attn""" )
lowercase : Union[str, Any] = k.replace("""norm1""" ,"""self_attn_layer_norm""" )
lowercase : List[Any] = k.replace("""norm2""" ,"""final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowercase : Union[str, Any] = k.replace("""norm1""" ,"""self_attn_layer_norm""" )
lowercase : Tuple = k.replace("""norm2""" ,"""encoder_attn_layer_norm""" )
lowercase : Dict = k.replace("""norm3""" ,"""final_layer_norm""" )
return k
def __snake_case ( __A ) -> Dict:
lowercase : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowercase : Union[str, Any] = sd.pop(__A )
lowercase : Optional[int] = k.replace("""layernorm_embedding""" ,"""layer_norm""" )
assert new_k not in sd
lowercase : List[Any] = v
lowerCAmelCase: Union[str, Any] =["START"]
@torch.no_grad()
def __snake_case ( __A ,__A ,__A ) -> int:
lowercase : Union[str, Any] = torch.load(__A ,map_location="""cpu""" )
lowercase : Optional[Any] = model["""model"""]
lowercase : Union[str, Any] = BlenderbotConfig.from_json_file(__A )
lowercase : Optional[Any] = BlenderbotForConditionalGeneration(__A )
lowercase : List[str] = m.model.state_dict().keys()
lowercase : Optional[Any] = []
lowercase : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase : str = rename_state_dict_key(__A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__A )
m.model.load_state_dict(__A ,strict=__A )
m.half()
m.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowerCAmelCase: str =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 607 | 1 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase_ = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None ) -> Tuple:
require_version(deps[pkg] , lowerCAmelCase__ )
| 704 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : Any=[30, 30] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Tuple=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=10 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=8 , SCREAMING_SNAKE_CASE__ : Any=10 , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = n_targets
lowerCAmelCase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase__ = num_patches + 1 + self.num_detection_tokens
def a ( self : Dict ) -> Tuple:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase__ = []
for i in range(self.batch_size ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE__ )
labels.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[str] ) -> List[str]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
lowerCAmelCase__ = YolosModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
lowerCAmelCase__ = YolosForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
lowerCAmelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase__ = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE__ , dtype=torch.long )
lowerCAmelCase__ = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE__ , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = labels
return inputs_dict
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = YolosModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def a ( self : int ) -> List[str]:
# YOLOS does not use inputs_embeds
pass
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : List[str] ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
# in YOLOS, the seq_len is different
lowerCAmelCase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def a ( self : Optional[Any] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# YOLOS has a different seq_length
lowerCAmelCase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Union[str, Any] ) -> Optional[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> List[str]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def a ( self : Dict ) -> int:
lowerCAmelCase__ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase__ = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [75, 75, 17, 63, 17]
lowerCAmelCase__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , SCREAMING_SNAKE_CASE__ ) )
| 61 |
from math import sqrt
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 | 0 |
from string import ascii_uppercase
SCREAMING_SNAKE_CASE__ = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE__ = dict(enumerate(ascii_uppercase))
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(a )
SCREAMING_SNAKE_CASE_ :Any = 0
while True:
if x == i:
SCREAMING_SNAKE_CASE_ :Tuple = 0
if len(a ) == len(a ):
break
key += key[i]
i += 1
return key
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = ""
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
SCREAMING_SNAKE_CASE_ :int = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = ""
SCREAMING_SNAKE_CASE_ :Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
SCREAMING_SNAKE_CASE_ :List[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = "THE GERMAN ATTACK"
SCREAMING_SNAKE_CASE_ :Tuple = "SECRET"
SCREAMING_SNAKE_CASE_ :Any = generate_key(a , a )
SCREAMING_SNAKE_CASE_ :int = cipher_text(a , a )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(a , a )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 703 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(UpperCAmelCase))
]
SCREAMING_SNAKE_CASE_ :Tuple = defaultdict(UpperCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ :Tuple = (1 << len(UpperCAmelCase)) - 1
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ :Optional[int] = self.count_ways_until(UpperCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self : str , UpperCAmelCase : Optional[int]):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase)):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 140 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowercase :
@staticmethod
def _a ( *lowercase_ , **lowercase_) -> Union[str, Any]:
pass
def A ( snake_case__ : str ) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase__ : List[str] = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
__snake_case = pipeline(
'document-question-answering' , model=lowercase_ , tokenizer=lowercase_ , image_processor=lowercase_)
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(lowercase_) , lowercase_ , '')))
__snake_case = 'What is the placebo?'
__snake_case = [
{
'image': load_image(lowercase_),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _a ( self , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = dqa_pipeline(lowercase_ , top_k=2)
self.assertEqual(
lowercase_ , [
[
{'score': ANY(lowercase_), 'answer': ANY(lowercase_), 'start': ANY(lowercase_), 'end': ANY(lowercase_)},
{'score': ANY(lowercase_), 'answer': ANY(lowercase_), 'start': ANY(lowercase_), 'end': ANY(lowercase_)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self) -> List[str]:
__snake_case = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2')
__snake_case = INVOICE_URL
__snake_case = 'How many cats are there?'
__snake_case = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(nested_simplify(lowercase_ , decimals=4) , lowercase_)
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(nested_simplify(lowercase_ , decimals=4) , lowercase_)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = './tests/fixtures/tests_samples/COCO/000000039769.png'
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(lowercase_ , [])
# We can optionnally pass directly the words and bounding boxes
__snake_case = './tests/fixtures/tests_samples/COCO/000000039769.png'
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , words=lowercase_ , boxes=lowercase_ , top_k=2)
self.assertEqual(lowercase_ , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self) -> List[str]:
__snake_case = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__snake_case = INVOICE_URL
__snake_case = 'What is the invoice number?'
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self) -> int:
__snake_case = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
__snake_case = INVOICE_URL
__snake_case = 'What is the invoice number?'
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self) -> int:
__snake_case = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase_)
__snake_case = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase_ , revision='3dc6de3' , )
__snake_case = INVOICE_URL
__snake_case = 'What is the invoice number?'
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(lowercase_) , lowercase_ , '')))
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self) -> List[str]:
__snake_case = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase_)
__snake_case = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase_ , revision='3dc6de3' , max_seq_len=5_0 , )
__snake_case = INVOICE_URL
__snake_case = 'What is the invoice number?'
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(lowercase_) , lowercase_ , '')))
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def _a ( self) -> Any:
__snake_case = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__snake_case = INVOICE_URL
__snake_case = 'What is the invoice number?'
__snake_case = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2)
self.assertEqual(nested_simplify(lowercase_ , decimals=4) , [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def _a ( self) -> List[str]:
pass
| 313 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCAmelCase__ : Dict = logging.getLogger(__name__)
def A ( snake_case__ : Optional[int]=2 , snake_case__ : List[str]=3 , snake_case__ : Tuple=16 , snake_case__ : int = 10 , snake_case__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
def get_dataset(snake_case__ : int ):
__snake_case = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__snake_case = get_dataset(snake_case__ )
__snake_case = get_dataset(snake_case__ )
__snake_case = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
__snake_case = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A ( snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[Any]=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = []
for epoch in range(snake_case__ ):
# Train quickly
model.train()
for batch in dataloader:
__snake_case , __snake_case = batch
__snake_case = model(snake_case__ )
__snake_case = torch.nn.functional.mse_loss(snake_case__ , snake_case__ )
accelerator.backward(snake_case__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowercase ( nn.Module ):
def __init__( self) -> Any:
super().__init__()
__snake_case = nn.Parameter(torch.randn(1))
__snake_case = nn.Parameter(torch.randn(1))
def _a ( self , lowercase_) -> Any:
return x * self.a + self.b
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(total_limit=1 , project_dir=lowercase_ , automatic_checkpoint_naming=lowercase_)
# Train baseline
__snake_case = Accelerator(project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)) , 1)
def _a ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
# Train baseline
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
__snake_case = os.path.join(lowercase_ , 'initial')
accelerator.save_state(lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
__snake_case = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
# Train partially
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
accelerator.load_state(lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
__snake_case = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save everything
__snake_case = os.path.join(lowercase_ , 'checkpoint')
accelerator.save_state(lowercase_)
# Load everything back in and make sure all states work
accelerator.load_state(lowercase_)
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=lowercase_)
# Train baseline
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
accelerator.save_state()
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
__snake_case = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
# Train partially
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase_)
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
accelerator.load_state(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_0'))
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
__snake_case = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_1'))
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = torch.tensor([1, 2, 3])
__snake_case = torch.tensor([2, 3, 4])
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(net.parameters())
__snake_case = Accelerator()
with self.assertRaises(lowercase_) as ve:
accelerator.register_for_checkpointing(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = str(ve.exception)
self.assertTrue('Item at index 0' in message)
self.assertTrue('Item at index 1' in message)
self.assertFalse('Item at index 2' in message)
self.assertFalse('Item at index 3' in message)
def _a ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case = torch.optim.lr_scheduler.StepLR(lowercase_ , step_size=1 , gamma=0.99)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=lowercase_)
# Train baseline
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
accelerator.save_state()
__snake_case = scheduler.state_dict()
train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
self.assertNotEqual(lowercase_ , scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_0'))
self.assertEqual(lowercase_ , scheduler.state_dict())
def _a ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ , total_limit=2)
# Train baseline
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case = accelerator.prepare(lowercase_)
# Save 3 states:
for _ in range(1_1):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_0')))
self.assertTrue(os.path.exists(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_9')))
self.assertTrue(os.path.exists(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_10')))
@require_cuda
def _a ( self) -> int:
__snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(lowercase_ , env=os.environ.copy())
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = "/tmp/accelerate/state_checkpointing"
UpperCAmelCase__ : List[Any] = DummyModel()
UpperCAmelCase__ : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCAmelCase__ : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCAmelCase__ , UpperCAmelCase__ : Dict = dummy_dataloaders()
UpperCAmelCase__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCAmelCase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCAmelCase__ , UpperCAmelCase__ : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCAmelCase__ : str = group["params"][0].device
break
assert param_device.type == accelerator.device.type
UpperCAmelCase__ : Optional[int] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
UpperCAmelCase__ : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
UpperCAmelCase__ : Any = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 313 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50 |
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50 | 1 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
__SCREAMING_SNAKE_CASE = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__SCREAMING_SNAKE_CASE = unicodedata.category(A_ )
if cat.startswith("P" ):
return True
return False
@dataclass
class UpperCamelCase_ ( _A):
"""simple docstring"""
snake_case__ : str = 42
snake_case__ : Any = True
snake_case__ : int = None
snake_case__ : int = None
snake_case__ : Any = -100
snake_case__ : Dict = "pt"
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> str:
import torch
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__SCREAMING_SNAKE_CASE = torch.tensor(batch["entity_ids"] ).shape[1]
__SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
__SCREAMING_SNAKE_CASE = [
list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels
]
else:
__SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels
]
__SCREAMING_SNAKE_CASE = [feature["ner_tags"] for feature in features]
__SCREAMING_SNAKE_CASE = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = [feature["original_entity_spans"] for feature in features]
__SCREAMING_SNAKE_CASE = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 682 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = (PNDMScheduler,)
A = (('''num_inference_steps''', 50),)
def snake_case_ ( self , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_lowerCAmelCase )
return config
def snake_case_ ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase__ :int = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = self.dummy_sample
lowerCAmelCase__ :str = 0.1 * sample
lowerCAmelCase__ :str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ :int = self.get_scheduler_config(**_lowerCAmelCase )
lowerCAmelCase__ :str = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowerCAmelCase__ :List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowerCAmelCase__ :int = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowerCAmelCase__ :Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase__ :int = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowerCAmelCase__ :int = new_scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ :List[str] = scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowerCAmelCase__ :Any = new_scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ :Optional[int] = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
lowerCAmelCase__ :Any = self.dummy_sample
lowerCAmelCase__ :Tuple = 0.1 * sample
lowerCAmelCase__ :Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ :Tuple = self.get_scheduler_config()
lowerCAmelCase__ :List[Any] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ :int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ :Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase__ :List[str] = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowerCAmelCase__ :Tuple = new_scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ :List[Any] = scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowerCAmelCase__ :List[str] = new_scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.scheduler_classes[0]
lowerCAmelCase__ :List[str] = self.get_scheduler_config(**_lowerCAmelCase )
lowerCAmelCase__ :str = scheduler_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = 10
lowerCAmelCase__ :int = self.dummy_model()
lowerCAmelCase__ :Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ :Dict = model(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Tuple = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ :Optional[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ :List[Any] = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ :Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.dummy_sample
lowerCAmelCase__ :List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(_lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(_lowerCAmelCase , "set_timesteps" ):
lowerCAmelCase__ :Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ :Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ :List[Any] = dummy_past_residuals[:]
lowerCAmelCase__ :List[str] = scheduler.step_prk(_lowerCAmelCase , 0 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowerCAmelCase__ :List[Any] = scheduler.step_prk(_lowerCAmelCase , 1 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ :Any = scheduler.step_plms(_lowerCAmelCase , 0 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowerCAmelCase__ :Any = scheduler.step_plms(_lowerCAmelCase , 1 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowerCAmelCase__ :int = self.scheduler_classes[0]
lowerCAmelCase__ :Any = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ :Any = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def snake_case_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ :Optional[int] = self.dummy_sample
lowerCAmelCase__ :Optional[Any] = 0.1 * sample
lowerCAmelCase__ :str = self.get_scheduler_config()
lowerCAmelCase__ :int = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ :Union[str, Any] = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
def snake_case_ ( self ):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase ):
lowerCAmelCase__ :Any = self.scheduler_classes[0]
lowerCAmelCase__ :Dict = self.get_scheduler_config()
lowerCAmelCase__ :Dict = scheduler_class(**_lowerCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.full_loop()
lowerCAmelCase__ :Union[str, Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCAmelCase__ :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCAmelCase__ :Dict = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowerCAmelCase__ :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCAmelCase__ :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowerCAmelCase__ :Dict = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCAmelCase__ :List[Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 703 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaPipeline
A = [
'''image_embeds''',
'''negative_image_embeds''',
]
A = ['''image_embeds''', '''negative_image_embeds''']
A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ :List[str] = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_unet
lowerCAmelCase__ :List[str] = self.dummy_movq
lowerCAmelCase__ :List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
lowerCAmelCase__ :Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ :Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "cpu"
lowerCAmelCase__ :Dict = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :str = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :Dict = output.images
lowerCAmelCase__ :str = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ :Tuple = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
lowerCAmelCase__ :str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCAmelCase__ :str = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase__ :Union[str, Any] = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = "red cat, 4k photo"
lowerCAmelCase__ :Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ :List[Any] = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , )
lowerCAmelCase__ :int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( _UpperCamelCase ):
lowercase : int =["pixel_values"]
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**a_ )
lowerCamelCase_ =size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ =get_size_dict(a_, default_to_square=a_ )
lowerCamelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ =get_size_dict(a_, default_to_square=a_, param_name='''crop_size''' )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ =do_convert_rgb
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(a_, size=size['''shortest_edge'''], default_to_square=a_ )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a_, size=(size['''height'''], size['''width''']), data_format=a_, **a_ )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(a_, param_name='''size''', default_to_square=a_ )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(a_, param_name='''crop_size''', default_to_square=a_ )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ =make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ =[convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(a_ ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=a_, size=a_ ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(a_, a_ ) for image in images]
lowerCamelCase_ ={"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 676 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 642 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Union[str, Any]=9 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.002 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , ) -> Union[str, Any]:
a = parent
a = batch_size
a = encoder_seq_length
a = decoder_seq_length
# For common tests
a = self.decoder_seq_length
a = is_training
a = use_attention_mask
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = d_ff
a = relative_attention_num_buckets
a = dropout_rate
a = initializer_factor
a = eos_token_id
a = pad_token_id
a = decoder_start_token_id
a = None
a = decoder_layers
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return TaConfig.from_pretrained("google/umt5-base" )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a = input_ids.clamp(self.pad_token_id + 1 )
a = decoder_input_ids.clamp(self.pad_token_id + 1 )
a = self.get_config()
a = config.num_attention_heads
a = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : str ) -> int:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , ) -> str:
a = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
a = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
a = result.last_hidden_state
a = result.past_key_values
a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , ) -> List[Any]:
a = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
a = model(__lowerCamelCase , use_cache=__lowerCamelCase )
a = model(__lowerCamelCase )
a = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = model(__lowerCamelCase )["last_hidden_state"]
a = model(__lowerCamelCase , past_key_values=__lowerCamelCase )["last_hidden_state"]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -1, random_slice_idx].detach()
a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : int , ) -> List[str]:
a = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
a = model(**__lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Tuple = [0.8, 0.9]
def __UpperCAmelCase ( self : Dict ) -> int:
a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __UpperCAmelCase ( self : int ) -> int:
a = self.model_tester.prepare_config_and_inputs()
a = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
a = self.model_tester.prepare_config_and_inputs()
a = config_and_inputs[0]
a = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __UpperCAmelCase ( self : str ) -> str:
a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
a = tokenizer(__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase ).input_ids
# fmt: off
a = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
a = model.generate(input_ids.to(__lowerCamelCase ) )
a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = args.pruning_method
lowerCAmelCase_ : str = args.threshold
lowerCAmelCase_ : Tuple = args.model_name_or_path.rstrip("/")
lowerCAmelCase_ : Dict = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''')
lowerCAmelCase_ : int = torch.load(os.path.join(a_ , "pytorch_model.bin"))
lowerCAmelCase_ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase_ : str = tensor
print(F'''Copied layer {name}''')
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase_ : Optional[int] = tensor
print(F'''Copied layer {name}''')
elif "bias" in name:
lowerCAmelCase_ : Union[str, Any] = tensor
print(F'''Copied layer {name}''')
else:
if pruning_method == "magnitude":
lowerCAmelCase_ : Tuple = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_)
lowerCAmelCase_ : Tuple = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase_ : Optional[Any] = name[:-6]
lowerCAmelCase_ : Tuple = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ : Optional[Any] = TopKBinarizer.apply(a_ , a_)
lowerCAmelCase_ : Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase_ : Union[str, Any] = name[:-6]
lowerCAmelCase_ : Dict = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ : Union[str, Any] = ThresholdBinarizer.apply(a_ , a_ , a_)
lowerCAmelCase_ : List[str] = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase_ : List[Any] = name[:-6]
lowerCAmelCase_ : Optional[int] = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ : int = -0.1, 1.1
lowerCAmelCase_ : Any = torch.sigmoid(a_)
lowerCAmelCase_ : List[Any] = s * (r - l) + l
lowerCAmelCase_ : List[str] = s_bar.clamp(min=0.0 , max=1.0)
lowerCAmelCase_ : Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''')
else:
raise ValueError("Unknown pruning method")
if target_model_path is None:
lowerCAmelCase_ : Optional[Any] = os.path.join(
os.path.dirname(a_) , F'''bertarized_{os.path.basename(a_)}''')
if not os.path.isdir(a_):
shutil.copytree(a_ , a_)
print(F'''\nCreated folder {target_model_path}''')
torch.save(a_ , os.path.join(a_ , "pytorch_model.bin"))
print("\nPruned model saved! See you later!")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowercase = parser.parse_args()
main(args)
| 659 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ = 8.9_88e9 # units = N * m^s * C^-2
def __lowerCamelCase ( a_ : float , a_ : float , a_ : float , a_ : float ) -> dict[str, float]:
__SCREAMING_SNAKE_CASE :int = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE :int = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE :Optional[Any] = abs(a_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE :List[Any] = abs(a_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE :Tuple = (COULOMBS_CONSTANT * charge_product / abs(a_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase = {
'''gpt-neox-20b''': 2048,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , **snake_case , ):
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , add_prefix_space=snake_case , **snake_case , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
lowercase = getattr(snake_case , pre_tok_state.pop('type' ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**snake_case )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
lowercase = input_ids[-self.model_max_length :]
return input_ids
| 565 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase = logging.getLogger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """token-classification"""
def __init__( self , snake_case ):
if type(snake_case ) == dict:
lowercase = Namespace(**snake_case )
lowercase = import_module('tasks' )
try:
lowercase = getattr(snake_case , hparams.task_type )
lowercase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase = self.token_classification_task.get_labels(hparams.labels )
lowercase = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.hparams
for mode in ["train", "dev", "test"]:
lowercase = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowercase = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
lowercase = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = False ):
lowercase = self._feature_file(snake_case )
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
"""Compute validation""" ""
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase , lowercase = outputs[:2]
lowercase = logits.detach().cpu().numpy()
lowercase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowercase = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowercase = np.argmax(snake_case , axis=2 )
lowercase = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowercase = dict(enumerate(self.labels ) )
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(snake_case , snake_case ),
'precision': precision_score(snake_case , snake_case ),
'recall': recall_score(snake_case , snake_case ),
'f1': fa_score(snake_case , snake_case ),
}
lowercase = dict(results.items() )
lowercase = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# when stable
lowercase , lowercase , lowercase = self._eval_end(snake_case )
lowercase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# updating to test_epoch_end instead of deprecated test_end
lowercase , lowercase , lowercase = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case ):
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
'--task_type' , default='NER' , type=snake_case , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=snake_case , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=snake_case , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = NERTransformer(args)
UpperCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 565 | 1 |
from __future__ import annotations
def A__ ( _a : dict , _a : str ):
'''simple docstring'''
snake_case__ , snake_case__ : str =set(_UpperCamelCase ), [start]
while stack:
snake_case__ : List[Any] =stack.pop()
explored.add(_UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_UpperCamelCase )
return explored
__lowerCamelCase : Any = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 385 |
def a__ ( _UpperCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCamelCase = sorted(string.lower() )
return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) )
if __name__ == "__main__":
a_ = input("""Enter a string """).strip()
a_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 175 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case: Optional[int] = logging.get_logger(__name__)
__snake_case: Union[str, Any] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "big_bird"
def __init__( self , lowerCAmelCase_=5_03_58 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=40_96 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-1_2 , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=66 , lowerCAmelCase_="block_sparse" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=64 , lowerCAmelCase_=3 , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ : Dict = vocab_size
a_ : Dict = max_position_embeddings
a_ : Any = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : Union[str, Any] = type_vocab_size
a_ : Tuple = layer_norm_eps
a_ : str = use_cache
a_ : Tuple = rescale_embeddings
a_ : Optional[int] = attention_type
a_ : Any = use_bias
a_ : Union[str, Any] = block_size
a_ : Optional[int] = num_random_blocks
a_ : Optional[Any] = classifier_dropout
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
a_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a_ : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 714 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case: List[str] = logging.get_logger(__name__)
__snake_case: Dict = "https://openaipublic.azureedge.net/jukebox/models/"
__snake_case: List[str] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _snake_case ( A_ : str ):
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Optional[Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : Tuple = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : List[str] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
a_ : str = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
a_ : Any = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ : Optional[Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
a_ : Optional[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _snake_case ( A_ : str , A_ : int , A_ : str , A_ : List[Any] ):
"""simple docstring"""
a_ : List[str] = {}
import re
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : int = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : int = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : List[str] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : Tuple = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
a_ : Dict = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Any = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A_ ):
a_ : Any = re_encoder_block_conv_in.match(A_ )
a_ : str = regex_match.groups()
a_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
a_ : Optional[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
a_ : Any = re_encoder_block_conv_in.sub(A_ , A_ )
elif re_encoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_resnet.match(A_ )
a_ : Optional[Any] = regex_match.groups()
a_ : str = int(groups[2] ) * 2 + int(groups[3] )
a_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
a_ : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_encoder_block_resnet.sub(A_ , A_ )
elif re_encoder_block_proj_out.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_proj_out.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
a_ : Optional[Any] = re_encoder_block_proj_out.sub(A_ , A_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A_ ):
a_ : Union[str, Any] = re_decoder_block_conv_out.match(A_ )
a_ : Union[str, Any] = regex_match.groups()
a_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
a_ : int = re_decoder_block_conv_out.sub(A_ , A_ )
elif re_decoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_resnet.match(A_ )
a_ : str = regex_match.groups()
a_ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
a_ : int = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : Optional[Any] = prefix + resnet_block
a_ : Any = re_decoder_block_resnet.sub(A_ , A_ )
elif re_decoder_block_proj_in.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_proj_in.match(A_ )
a_ : Any = regex_match.groups()
a_ : Dict = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
a_ : Tuple = re_decoder_block_proj_in.sub(A_ , A_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A_ ):
a_ : int = re_prior_cond_conv_out.match(A_ )
a_ : Dict = regex_match.groups()
a_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
a_ : str = re_prior_cond_conv_out.sub(A_ , A_ )
elif re_prior_cond_resnet.fullmatch(A_ ):
a_ : List[str] = re_prior_cond_resnet.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Optional[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
a_ : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_prior_cond_resnet.sub(A_ , A_ )
elif re_prior_cond_proj_in.fullmatch(A_ ):
a_ : List[Any] = re_prior_cond_proj_in.match(A_ )
a_ : int = regex_match.groups()
a_ : Any = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
a_ : Union[str, Any] = re_prior_cond_proj_in.sub(A_ , A_ )
# keep original key
else:
a_ : str = original_key
a_ : Any = replace_key(A_ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
a_ : Tuple = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
a_ : Optional[Any] = original_key
a_ : Tuple = original_key
a_ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _snake_case ( A_ : Dict=None , A_ : Optional[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
a_ : Any = requests.get(f'''{PREFIX}{file}''' , allow_redirects=A_ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=A_ )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , """wb""" ).write(r.content )
a_ : List[Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
a_ : Optional[Any] = JukeboxConfig.from_pretrained(A_ )
a_ : List[Any] = JukeboxModel(A_ )
a_ : Optional[Any] = []
a_ : Optional[Any] = {}
for i, dict_name in enumerate(A_ ):
a_ : int = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""]
a_ : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
a_ : int = old_dic[k]
elif k.endswith(""".w""" ):
a_ : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ : Dict = old_dic[k]
else:
a_ : Optional[int] = old_dic[k]
a_ : List[Any] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
a_ : Any = fix_jukebox_keys(A_ , model.state_dict() , A_ , A_ )
weight_dict.append(A_ )
a_ : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(A_ )
for i in range(len(A_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A_ ).mkdir(exist_ok=A_ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(A_ , A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
return weight_dict
if __name__ == "__main__":
__snake_case: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__snake_case: Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 460 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.