code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
def lowercase__(A , A ) ->bool:
"""simple docstring"""
lowercase__ : Optional[int]= len(a__ )
lowercase__ : Dict= [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase__ : Optional[Any]= True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase__ : Optional[Any]= False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase__ : Any= subset[i - 1][j]
if arr[i - 1] <= j:
lowercase__ : Union[str, Any]= subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
a : Tuple = 0 # The first color of the flag.
a : int = 1 # The second color of the flag.
a : Tuple = 2 # The third color of the flag.
a : str = (red, white, blue)
def lowercase__(A ) ->list:
"""simple docstring"""
if not sequence:
return []
if len(snake_case__ ) == 1:
return list(snake_case__ )
lowercase__ : Union[str, Any]= 0
lowercase__ : Optional[int]= len(snake_case__ ) - 1
lowercase__ : Any= 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__, lowercase__ : Dict= sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__, lowercase__ : Dict= sequence[high], sequence[mid]
high -= 1
else:
lowercase__ : Optional[Any]= f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(snake_case__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a : int = input("""Enter numbers separated by commas:\n""").strip()
a : List[str] = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 717 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= str(id_ )
lowercase__ : Optional[Any]= None
lowercase__ : List[Any]= None
lowercase__ : Dict= []
lowercase__ : Dict= {} # {vertex:distance}
def __lt__( self , snake_case__ ):
'''simple docstring'''
return self.key < other.key
def __repr__( self ):
'''simple docstring'''
return self.id
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
self.neighbors.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= weight
def lowercase__(A , A , A , A ) ->Any:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A )
graph[b - 1].add_edge(graph[a - 1] , A )
def lowercase__(A , A ) ->list:
"""simple docstring"""
lowercase__ : Tuple= []
for u in graph:
lowercase__ : Dict= math.inf
lowercase__ : Dict= None
lowercase__ : List[Any]= 0
lowercase__ : List[str]= graph[:]
while q:
lowercase__ : Dict= min(A )
q.remove(A )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ : Union[str, Any]= u
lowercase__ : Tuple= u.edges[v.id]
for i in range(1 , len(A ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__(A , A ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
lowercase__ : Optional[int]= math.inf
lowercase__ : List[str]= None
lowercase__ : Optional[Any]= 0
lowercase__ : Dict= list(A )
hq.heapify(A )
while h:
lowercase__ : List[Any]= hq.heappop(A )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ : List[str]= u
lowercase__ : int= u.edges[v.id]
hq.heapify(A )
for i in range(1 , len(A ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__() ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= torch.nn.Linear(10 , 10 )
lowercase__ : Optional[int]= torch.optim.SGD(model.parameters() , 0.1 )
lowercase__ : str= Accelerator()
lowercase__ : List[str]= accelerator.prepare(SCREAMING_SNAKE_CASE_ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE_ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 719 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= text, pattern
lowercase__ : int= len(snake_case_ ), len(snake_case_ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# searches pattern in text and returns index positions
lowercase__ : List[str]= []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ : Union[str, Any]= self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
lowercase__ : Tuple= self.match_in_pattern(self.text[mismatch_index] )
lowercase__ : Tuple= (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a : List[Any] = """ABAABA"""
a : str = """AB"""
a : List[Any] = BoyerMooreSearch(text, pattern)
a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 720 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 0 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a : str = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a : Union[str, Any] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a : Optional[int] = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowercase__(A , A ) ->Optional[int]:
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase__(A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : List[Any]= simple_accuracy(A , A )
lowercase__ : List[str]= float(fa_score(y_true=A , y_pred=A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__(A , A ) ->int:
"""simple docstring"""
lowercase__ : List[str]= np.array(A )
lowercase__ : List[str]= np.array(A )
lowercase__ : str= en_sentvecs.shape[0]
# mean centering
lowercase__ : Tuple= en_sentvecs - np.mean(A , axis=0 )
lowercase__ : str= in_sentvecs - np.mean(A , axis=0 )
lowercase__ : str= cdist(A , A , "cosine" )
lowercase__ : List[str]= np.array(range(A ) )
lowercase__ : Dict= sim.argsort(axis=1 )[:, :10]
lowercase__ : Optional[int]= np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 721 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
"""simple docstring"""
def lowercase__(A ) ->Dict:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : str= 0
lowercase__ : List[Any]= number
while duplicate > 0:
lowercase__, lowercase__ : Optional[int]= divmod(_lowerCAmelCase , 10 )
fact_sum += factorial(_lowerCAmelCase )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
a : str = int(input("""Enter number: """).strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 700 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= set_counts
lowercase__ : Any= max(__lowerCamelCase )
lowercase__ : Tuple= len(__lowerCamelCase )
lowercase__ : Dict= [1] * num_sets
lowercase__ : Dict= list(range(__lowerCamelCase ) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= self.get_parent(__lowerCamelCase )
lowercase__ : str= self.get_parent(__lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ : List[str]= 0
lowercase__ : Dict= dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ : Dict= self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ : Any= 0
lowercase__ : Union[str, Any]= src_parent
lowercase__ : Dict= self.set_counts[src_parent]
lowercase__ : Tuple= max(self.max_set , __lowerCamelCase )
return True
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ : Union[str, Any]= self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 701 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import os
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : List[str]= os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
lowercase__ : str= os.path.join(UpperCamelCase__ , "triangle.txt" )
with open(UpperCamelCase__ ) as f:
lowercase__ : str= f.readlines()
lowercase__ : List[Any]= []
for line in triangle:
lowercase__ : Optional[int]= []
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase__ ) )
a.append(UpperCamelCase__ )
for i in range(1 , len(UpperCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowercase__ : List[Any]= a[i - 1][j] if j != len(a[i - 1] ) else 0
lowercase__ : List[Any]= a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase__ , UpperCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 702 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase__(A ) ->Any:
"""simple docstring"""
return (data["data"], data["target"])
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : int= XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Predict target for test data
lowercase__ : int= xgb.predict(__SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any]= predictions.reshape(len(__SCREAMING_SNAKE_CASE ) , 1 )
return predictions
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= fetch_california_housing()
lowercase__ : Optional[Any]= data_handling(__SCREAMING_SNAKE_CASE )
lowercase__ : int= train_test_split(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 )
lowercase__ : Optional[int]= xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 703 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 0 |
"""simple docstring"""
import os
def lowercase__() ->List[Any]:
"""simple docstring"""
with open(os.path.dirname(A ) + "/p022_names.txt" ) as file:
lowercase__ : List[Any]= str(file.readlines()[0] )
lowercase__ : Union[str, Any]= names.replace("\"" , "" ).split("," )
names.sort()
lowercase__ : Dict= 0
lowercase__ : Union[str, Any]= 0
for i, name in enumerate(A ):
for letter in name:
name_score += ord(A ) - 64
total_score += (i + 1) * name_score
lowercase__ : Dict= 0
return total_score
if __name__ == "__main__":
print(solution())
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 0 |
import argparse
from collections import defaultdict
def lowercase__(A , A , A , A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any]= f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowerCamelCase , "r" ) as f:
lowercase__ : Union[str, Any]= f.readlines()
lowercase__ : List[Any]= f'''class {class_name}('''
lowercase__ : Union[str, Any]= f'''{4 * ' '}def {test_name}('''
lowercase__ : Optional[int]= f'''{8 * ' '}{correct_line.split()[0]}'''
lowercase__ : str= f'''{16 * ' '}{correct_line.split()[0]}'''
lowercase__ : int= False
lowercase__ : str= False
lowercase__ : List[Any]= False
lowercase__ : List[str]= False
lowercase__ : Tuple= 0
lowercase__ : str= 0
lowercase__ : List[Any]= []
for line in lines:
if line.startswith(_lowerCamelCase ):
lowercase__ : Optional[Any]= True
elif in_class and line.startswith(_lowerCamelCase ):
lowercase__ : List[Any]= True
elif in_class and in_func and (line.startswith(_lowerCamelCase ) or line.startswith(_lowerCamelCase )):
lowercase__ : Any= len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase__ : Any= True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase__ : Optional[int]= True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * ' '}{correct_line}''' )
lowercase__ : int= False
else:
new_lines.append(_lowerCamelCase )
with open(_lowerCamelCase , "w" ) as f:
for line in new_lines:
f.write(_lowerCamelCase )
def lowercase__(A , A=None ) ->Tuple:
"""simple docstring"""
if fail is not None:
with open(_lowerCamelCase , "r" ) as f:
lowercase__ : Dict= {l.strip() for l in f.readlines()}
else:
lowercase__ : List[Any]= None
with open(_lowerCamelCase , "r" ) as f:
lowercase__ : int= f.readlines()
lowercase__ : List[str]= defaultdict(_lowerCamelCase )
for line in correct_lines:
lowercase__ : str= line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
a : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 705 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__UpperCamelCase , vae=__UpperCamelCase , scheduler=__UpperCamelCase )
# create a imagenet -> id dictionary for easier use
lowercase__ : Dict= {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ : Optional[Any]= int(__UpperCamelCase )
lowercase__ : Tuple= dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase__ : Optional[int]= list(__UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 4.0 , snake_case__ = None , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Dict= len(__UpperCamelCase )
lowercase__ : Any= self.transformer.config.sample_size
lowercase__ : Optional[Any]= self.transformer.config.in_channels
lowercase__ : Union[str, Any]= randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCamelCase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ : str= torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ : List[str]= torch.tensor(__UpperCamelCase , device=self.device ).reshape(-1 )
lowercase__ : List[str]= torch.tensor([1000] * batch_size , device=self.device )
lowercase__ : Dict= torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ : Union[str, Any]= latent_model_input[: len(__UpperCamelCase ) // 2]
lowercase__ : str= torch.cat([half, half] , dim=0 )
lowercase__ : int= self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
lowercase__ : Union[str, Any]= t
if not torch.is_tensor(__UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ : List[Any]= latent_model_input.device.type == "mps"
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase__ : Optional[Any]= torch.floataa if is_mps else torch.floataa
else:
lowercase__ : Optional[Any]= torch.intaa if is_mps else torch.intaa
lowercase__ : str= torch.tensor([timesteps] , dtype=__UpperCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ : Optional[Any]= timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : Tuple= timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ : Dict= self.transformer(
__UpperCamelCase , timestep=__UpperCamelCase , class_labels=__UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__, lowercase__ : int= noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__, lowercase__ : Optional[int]= torch.split(__UpperCamelCase , len(__UpperCamelCase ) // 2 , dim=0 )
lowercase__ : Any= uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ : List[str]= torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ : Dict= torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__, lowercase__ : Any= torch.split(__UpperCamelCase , __UpperCamelCase , dim=1 )
else:
lowercase__ : Optional[Any]= noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ : Optional[Any]= self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
if guidance_scale > 1:
lowercase__, lowercase__ : Optional[int]= latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ : Union[str, Any]= latent_model_input
lowercase__ : Union[str, Any]= 1 / self.vae.config.scaling_factor * latents
lowercase__ : int= self.vae.decode(__UpperCamelCase ).sample
lowercase__ : List[str]= (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : str= samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : List[str]= self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 706 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 0 |
"""simple docstring"""
def lowercase__() ->list[list[int]]:
"""simple docstring"""
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
a : Union[str, Any] = generate_large_matrix()
a : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowercase__(A ) ->None:
"""simple docstring"""
assert all(row == sorted(A , reverse=A ) for row in grid )
assert all(list(A ) == sorted(A , reverse=A ) for col in zip(*A ) )
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Tuple= 0
lowercase__ : Dict= len(A ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase__ : str= (left + right) // 2
lowercase__ : int= array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase__ : Optional[Any]= mid + 1
else:
lowercase__ : List[Any]= mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(A )
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= 0
lowercase__ : Optional[int]= len(grid[0] )
for i in range(len(A ) ):
lowercase__ : Optional[int]= find_negative_index(grid[i][:bound] )
total += bound
return (len(A ) * len(grid[0] )) - total
def lowercase__(A ) ->int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : List[Any]= 0
for row in grid:
for i, number in enumerate(A ):
if number < 0:
total += len(A ) - i
break
return total
def lowercase__() ->None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
lowercase__ : List[Any]= (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase__ : Optional[Any]= timeit(f'''{func}(grid=grid)''' , setup=A , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 707 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 0 |
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= {}
def UpperCAmelCase_ ( self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase__ , " -> " , " -> ".join([str(UpperCamelCase__ ) for j in self.vertex[i]] ) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase__ )
else:
# else make a new vertex
lowercase__ : Union[str, Any]= [to_vertex]
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# visited array for storing already visited nodes
lowercase__ : List[Any]= [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
# mark start vertex as visited
lowercase__ : str= True
print(UpperCamelCase__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
a : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 708 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__() ->List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def lowercase__() ->Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def lowercase__() ->int:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A ):
http_head("https://huggingface.co" )
| 709 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __UpperCAmelCase( datasets.BeamBasedBuilder ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=UpperCamelCase__ , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class __UpperCAmelCase( datasets.BeamBasedBuilder ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=UpperCamelCase__ , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def lowercase__():
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowercase__():
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __UpperCAmelCase( lowercase_ ):
"""simple docstring"""
@require_beam
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ : Union[str, Any]= DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowercase__ : Optional[int]= builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import apache_beam as beam
lowercase__ : Union[str, Any]= beam.io.parquetio.WriteToParquet
lowercase__ : Optional[int]= len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ : List[Any]= DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowercase__ : Tuple= partial(UpperCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowercase__ : Optional[Any]= builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ : Optional[Any]= DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ : Optional[Any]= NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowercase__ : int= builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 710 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a : Tuple = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
a : Dict = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __UpperCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
__lowerCamelCase = TaTokenizer
__lowerCamelCase = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__=100 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Any= [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Optional[Any]= len(set(filter(lambda snake_case__ : bool("extra_id_" in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase__ : int= vocab_file
lowercase__ : Dict= False if not self.vocab_file else True
lowercase__ : Optional[Any]= extra_ids
@staticmethod
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : List[Any]= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase_ , )
return max_model_length
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : int= os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Dict= token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Any= token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Dict= [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return list(
set(filter(lambda snake_case__ : bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 711 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
# TODO Update this
a : Union[str, Any] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __UpperCAmelCase( _UpperCamelCase ):
"""simple docstring"""
__lowerCamelCase = "esm"
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : Dict= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : Optional[Any]= num_attention_heads
lowercase__ : int= intermediate_size
lowercase__ : List[Any]= hidden_dropout_prob
lowercase__ : Dict= attention_probs_dropout_prob
lowercase__ : Optional[Any]= max_position_embeddings
lowercase__ : int= initializer_range
lowercase__ : List[str]= layer_norm_eps
lowercase__ : Any= position_embedding_type
lowercase__ : List[Any]= use_cache
lowercase__ : List[str]= emb_layer_norm_before
lowercase__ : Union[str, Any]= token_dropout
lowercase__ : Optional[int]= is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : str= EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ : Tuple= EsmFoldConfig(**_UpperCAmelCase )
lowercase__ : List[str]= esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : Optional[Any]= get_default_vocab_list()
else:
lowercase__ : int= vocab_list
else:
lowercase__ : Any= None
lowercase__ : int= None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , _UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
lowercase__ : Tuple= self.esmfold_config.to_dict()
return output
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 0
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = 128
__lowerCamelCase = None
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.trunk is None:
lowercase__ : Optional[int]= TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
lowercase__ : int= TrunkConfig(**self.trunk )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= asdict(self )
lowercase__ : Tuple= self.trunk.to_dict()
return output
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = 48
__lowerCamelCase = 1_024
__lowerCamelCase = 128
__lowerCamelCase = 32
__lowerCamelCase = 32
__lowerCamelCase = 32
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = False
__lowerCamelCase = 4
__lowerCamelCase = 128
__lowerCamelCase = None
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.structure_module is None:
lowercase__ : List[str]= StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
lowercase__ : List[str]= StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase__ : str= self.sequence_state_dim // self.sequence_head_width
lowercase__ : Optional[int]= self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= asdict(self )
lowercase__ : Union[str, Any]= self.structure_module.to_dict()
return output
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = 384
__lowerCamelCase = 128
__lowerCamelCase = 16
__lowerCamelCase = 128
__lowerCamelCase = 12
__lowerCamelCase = 4
__lowerCamelCase = 8
__lowerCamelCase = 0.1
__lowerCamelCase = 8
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 7
__lowerCamelCase = 10
__lowerCamelCase = 1E-8
__lowerCamelCase = 1E5
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return asdict(self )
def lowercase__() ->Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 712 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 0 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __UpperCAmelCase( _a ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , **snake_case__ , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
lowercase__ : Any= Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= None
lowercase__ : Optional[int]= None
lowercase__ : Dict= None
lowercase__ : List[str]= None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
lowercase__ : Optional[Any]= self.builder.as_dataset(
split="train" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowercase__ : Dict= dataset
lowercase__ : Any= name
lowercase__ : int= con
lowercase__ : Dict= batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase__ : Dict= num_proc
lowercase__ : int= to_sql_kwargs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.to_sql_kwargs.pop("sql" , _A )
lowercase__ : Dict= self.to_sql_kwargs.pop("con" , _A )
lowercase__ : Union[str, Any]= self.to_sql_kwargs.pop("index" , _A )
lowercase__ : str= self._write(index=_A , **self.to_sql_kwargs )
return written
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__, lowercase__ : int= args
lowercase__ : str= {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
lowercase__ : Optional[Any]= query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase__ : Tuple= batch.to_pandas()
lowercase__ : Optional[Any]= df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def UpperCAmelCase_ ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowercase__, lowercase__ : List[Any]= len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 713 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a : Optional[int] = get_tests_dir("""fixtures/dummy-config.json""")
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= 0
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= AutoConfig.for_model("roberta" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowercase__ : List[Any]= os.path.join(__UpperCamelCase , "fake-roberta" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
lowercase__ : List[Any]= AutoConfig.from_pretrained(__UpperCamelCase )
self.assertEqual(type(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , __UpperCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoConfig.register("model" , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoConfig.register("bert" , __UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ : Dict= CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
lowercase__ : Any= AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ : Dict= AutoConfig.from_pretrained("bert-base" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ : List[str]= AutoConfig.from_pretrained(__UpperCamelCase , revision="aaaaaa" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
lowercase__ : List[Any]= AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
lowercase__ : str= AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
lowercase__ : Optional[int]= AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__UpperCamelCase )
lowercase__ : Union[str, Any]= AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
lowercase__ : List[str]= AutoConfig.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "new-model"
try:
AutoConfig.register("new-model" , __UpperCamelCase )
# If remote code is not set, the default is to use local
lowercase__ : List[str]= AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
lowercase__ : Optional[int]= AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
lowercase__ : str= AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 714 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 0 |
"""simple docstring"""
a : Any = """Tobias Carryer"""
from time import time
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=int(time() ) ): # noqa: B008
'''simple docstring'''
lowercase__ : str= multiplier
lowercase__ : Tuple= increment
lowercase__ : List[Any]= modulo
lowercase__ : str= seed
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a : int = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 715 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : List[str] = """https://openaipublic.azureedge.net/jukebox/models/"""
a : Optional[int] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowercase__(A ) ->Tuple:
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowercase__ : str= key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowercase__ : List[str]= key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowercase__ : Union[str, Any]= key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowercase__ : Dict= key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowercase__ : str= key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowercase__ : Optional[int]= key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase__ : int= key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowercase__ : Dict= key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def lowercase__(A , A , A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : List[Any]= {}
import re
lowercase__ : str= re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase__ : Union[str, Any]= re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase__ : Tuple= re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase__ : Tuple= re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase__ : Tuple= re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase__ : Optional[int]= re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase__ : Optional[int]= re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowercase__ : Optional[int]= re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase__ : Tuple= re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
lowercase__ : Any= re_encoder_block_conv_in.match(A )
lowercase__ : Optional[Any]= regex_match.groups()
lowercase__ : Optional[Any]= int(groups[2] ) * 2 + int(groups[3] )
lowercase__ : List[Any]= f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowercase__ : Any= re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
lowercase__ : Dict= re_encoder_block_resnet.match(A )
lowercase__ : Union[str, Any]= regex_match.groups()
lowercase__ : List[str]= int(groups[2] ) * 2 + int(groups[3] )
lowercase__ : str= {"1": 1, "3": 2}[groups[-2]]
lowercase__ : Dict= f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowercase__ : int= f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowercase__ : Optional[Any]= prefix + resnet_block
lowercase__ : Optional[Any]= re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
lowercase__ : Optional[Any]= re_encoder_block_proj_out.match(A )
lowercase__ : Dict= regex_match.groups()
lowercase__ : Optional[int]= f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowercase__ : Optional[Any]= re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
lowercase__ : Any= re_decoder_block_conv_out.match(A )
lowercase__ : Optional[Any]= regex_match.groups()
lowercase__ : Optional[int]= int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase__ : Any= f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowercase__ : Dict= re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
lowercase__ : Any= re_decoder_block_resnet.match(A )
lowercase__ : Optional[Any]= regex_match.groups()
lowercase__ : Any= int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase__ : Dict= {"1": 1, "3": 2}[groups[-2]]
lowercase__ : List[str]= f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowercase__ : Union[str, Any]= f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowercase__ : Dict= prefix + resnet_block
lowercase__ : Optional[int]= re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
lowercase__ : Dict= re_decoder_block_proj_in.match(A )
lowercase__ : Any= regex_match.groups()
lowercase__ : int= f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowercase__ : Union[str, Any]= re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
lowercase__ : List[str]= re_prior_cond_conv_out.match(A )
lowercase__ : List[Any]= regex_match.groups()
lowercase__ : List[str]= int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase__ : Union[str, Any]= f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowercase__ : Union[str, Any]= re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
lowercase__ : Optional[int]= re_prior_cond_resnet.match(A )
lowercase__ : Union[str, Any]= regex_match.groups()
lowercase__ : Union[str, Any]= int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase__ : Union[str, Any]= {"1": 1, "3": 2}[groups[-2]]
lowercase__ : str= f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowercase__ : Optional[int]= f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowercase__ : Dict= prefix + resnet_block
lowercase__ : int= re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
lowercase__ : Any= re_prior_cond_proj_in.match(A )
lowercase__ : List[str]= regex_match.groups()
lowercase__ : int= f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowercase__ : int= re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
lowercase__ : int= original_key
lowercase__ : List[Any]= replace_key(A )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
lowercase__ : int= model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowercase__ : Optional[int]= original_key
lowercase__ : Optional[Any]= original_key
lowercase__ : Optional[Any]= value
return new_dict
@torch.no_grad()
def lowercase__(A=None , A=None ) ->List[Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowercase__ : int= requests.get(f'''{PREFIX}{file}''' , allow_redirects=A )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=A )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , "wb" ).write(r.content )
lowercase__ : Tuple= MODEL_MAPPING[model_name.split("/" )[-1]]
lowercase__ : Dict= JukeboxConfig.from_pretrained(A )
lowercase__ : Dict= JukeboxModel(A )
lowercase__ : Tuple= []
lowercase__ : List[Any]= {}
for i, dict_name in enumerate(A ):
lowercase__ : Tuple= torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["model"]
lowercase__ : List[str]= {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowercase__ : List[str]= old_dic[k]
elif k.endswith(".w" ):
lowercase__ : Optional[int]= old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase__ : Any= old_dic[k]
else:
lowercase__ : Any= old_dic[k]
lowercase__ : Any= "vqvae" if i == 0 else f'''priors.{3 - i}'''
lowercase__ : str= fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
lowercase__ : Union[str, Any]= weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , "w" ) as txtfile:
json.dump(A , A )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a : Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 716 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Tuple= parent
lowercase__ : Optional[int]= batch_size
lowercase__ : Any= seq_length
lowercase__ : List[str]= is_training
lowercase__ : str= use_input_mask
lowercase__ : int= use_token_type_ids
lowercase__ : List[str]= use_labels
lowercase__ : List[str]= vocab_size
lowercase__ : Optional[int]= hidden_size
lowercase__ : Dict= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[Any]= intermediate_size
lowercase__ : Any= hidden_act
lowercase__ : List[Any]= hidden_dropout_prob
lowercase__ : Union[str, Any]= attention_probs_dropout_prob
lowercase__ : Tuple= max_position_embeddings
lowercase__ : Optional[Any]= type_vocab_size
lowercase__ : Dict= type_sequence_label_size
lowercase__ : Tuple= initializer_range
lowercase__ : List[str]= num_labels
lowercase__ : Dict= num_choices
lowercase__ : Dict= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Any= None
if self.use_input_mask:
lowercase__ : Union[str, Any]= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any]= None
if self.use_token_type_ids:
lowercase__ : List[str]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Optional[Any]= None
lowercase__ : List[str]= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : str= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Dict= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[int]= model(snake_case__ , attention_mask=snake_case__ )
lowercase__ : Tuple= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= True
lowercase__ : Optional[Any]= OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[Any]= model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
lowercase__ : int= model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
lowercase__ : List[Any]= model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Tuple= OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= True
lowercase__ : Union[str, Any]= True
lowercase__ : Optional[Any]= OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
lowercase__ : Optional[Any]= model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
lowercase__ : List[Any]= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Union[str, Any]= ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : List[str]= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ : Tuple= torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : Tuple= torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ : List[Any]= model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['''hidden_states'''][0]
lowercase__ : List[Any]= model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['''hidden_states'''][0]
# select random slice
lowercase__ : Any= ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : Union[str, Any]= output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : List[str]= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.prepare_config_and_inputs()
(
lowercase__
) : str= config_and_inputs
lowercase__ : List[Any]= {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= OpenLlamaModelTester(self )
lowercase__ : List[Any]= ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : str= type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any]= 3
lowercase__ : List[Any]= input_dict['''input_ids''']
lowercase__ : int= input_ids.ne(1 ).to(snake_case__ )
lowercase__ : str= ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : Dict= OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int]= 3
lowercase__ : Optional[int]= '''single_label_classification'''
lowercase__ : List[str]= input_dict['''input_ids''']
lowercase__ : Dict= input_ids.ne(1 ).to(snake_case__ )
lowercase__ : int= ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : Optional[int]= OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str= 3
lowercase__ : Union[str, Any]= '''multi_label_classification'''
lowercase__ : Any= input_dict['''input_ids''']
lowercase__ : List[Any]= input_ids.ne(1 ).to(snake_case__ )
lowercase__ : List[str]= ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Tuple= OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str]= ids_tensor([1, 10] , config.vocab_size )
lowercase__ : Optional[Any]= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : int= OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
lowercase__ : Union[str, Any]= original_model(snake_case__ ).last_hidden_state
lowercase__ : List[Any]= original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : Tuple= {'''type''': scaling_type, '''factor''': 10.0}
lowercase__ : Optional[int]= OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
lowercase__ : Dict= scaled_model(snake_case__ ).last_hidden_state
lowercase__ : List[str]= scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
| 718 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
a : Dict = {"""facebook/blenderbot_small-90M""": 512}
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Dict= set()
lowercase__ : str= word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : str= char
lowercase__ : int= set(UpperCamelCase__ )
return pairs
class __UpperCAmelCase( lowercase_ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case__ , snake_case__ , snake_case__="__start__" , snake_case__="__end__" , snake_case__="__unk__" , snake_case__="__null__" , **snake_case__ , ):
'''simple docstring'''
super().__init__(unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowercase__ : List[str]= json.load(snake_case__ )
lowercase__ : Optional[int]= {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
lowercase__ : List[Any]= merges_handle.read().split("\n" )[1:-1]
lowercase__ : List[str]= [tuple(merge.split() ) for merge in merges]
lowercase__ : int= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : List[Any]= {}
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__ : Any= re.sub("([.,!?()])" , r" \1" , snake_case__ )
lowercase__ : List[str]= re.sub("(\')" , r" \1 " , snake_case__ )
lowercase__ : Any= re.sub(r"\s{2,}" , " " , snake_case__ )
if "\n" in token:
lowercase__ : List[Any]= token.replace("\n" , " __newln__" )
lowercase__ : Union[str, Any]= token.split(" " )
lowercase__ : Union[str, Any]= []
for token in tokens:
if not len(snake_case__ ):
continue
lowercase__ : Dict= token.lower()
lowercase__ : int= tuple(snake_case__ )
lowercase__ : Any= tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Any= get_pairs(snake_case__ )
if not pairs:
words.append(snake_case__ )
continue
while True:
lowercase__ : int= min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ : Union[str, Any]= bigram
lowercase__ : int= []
lowercase__ : str= 0
while i < len(snake_case__ ):
try:
lowercase__ : Union[str, Any]= word.index(snake_case__ , snake_case__ )
new_word.extend(word[i:j] )
lowercase__ : int= j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int= tuple(snake_case__ )
lowercase__ : Optional[Any]= new_word
if len(snake_case__ ) == 1:
break
else:
lowercase__ : Tuple= get_pairs(snake_case__ )
lowercase__ : Tuple= "@@ ".join(snake_case__ )
lowercase__ : Optional[int]= word[:-4]
lowercase__ : Tuple= word
words.append(snake_case__ )
return " ".join(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= []
lowercase__ : List[str]= re.findall(r"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= token.lower()
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : int= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
lowercase__ : str= 0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ : List[Any]= token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
| 719 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 0 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a : Optional[Any] = Mapping[str, np.ndarray]
a : Any = Mapping[str, Any] # Is a nested dict.
a : Any = 0.01
@dataclasses.dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
__lowerCamelCase = None
# Chain corresponding to each parent
__lowerCamelCase = None
def lowercase__(A ) ->Protein:
"""simple docstring"""
lowercase__ : Dict= R"(\[[A-Z]+\]\n)"
lowercase__ : int= [tag.strip() for tag in re.split(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0]
lowercase__ : int= zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
lowercase__ : Dict= ["N", "CA", "C"]
lowercase__ : Optional[Any]= None
lowercase__ : List[Any]= None
lowercase__ : List[str]= None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ : List[str]= g[1][0].strip()
for i in range(len(lowerCamelCase__ ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ : List[Any]= "X" # FIXME: strings are immutable
lowercase__ : Optional[int]= np.array(
[residue_constants.restype_order.get(lowerCamelCase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ : List[str]= []
for axis in range(3 ):
tertiary.append(list(map(lowerCamelCase__ , g[1][axis].split() ) ) )
lowercase__ : int= np.array(lowerCamelCase__ )
lowercase__ : str= np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase__ ):
lowercase__ : Tuple= np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ : List[Any]= np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
lowercase__ : str= np.zeros(
(
len(lowerCamelCase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase__ ):
lowercase__ : Union[str, Any]= 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCamelCase__ , atom_mask=lowerCamelCase__ , aatype=lowerCamelCase__ , residue_index=np.arange(len(lowerCamelCase__ ) ) , b_factors=lowerCamelCase__ , )
def lowercase__(A , A = 0 ) ->List[str]:
"""simple docstring"""
lowercase__ : int= []
lowercase__ : int= prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
lowercase__ : str= prot.parents
lowercase__ : Optional[int]= prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ : List[str]= [p for i, p in zip(lowerCamelCase__ , lowerCamelCase__ ) if i == chain_id]
if parents is None or len(lowerCamelCase__ ) == 0:
lowercase__ : Optional[Any]= ["N/A"]
pdb_headers.append(f'''PARENT {' '.join(lowerCamelCase__ )}''' )
return pdb_headers
def lowercase__(A , A ) ->str:
"""simple docstring"""
lowercase__ : Optional[int]= []
lowercase__ : Optional[int]= pdb_str.split("\n" )
lowercase__ : Tuple= prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
lowercase__ : Optional[int]= 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ : Any= []
if prot.parents_chain_index is not None:
lowercase__ : List[Any]= {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCamelCase__ ) , [] )
parent_dict[str(lowerCamelCase__ )].append(lowerCamelCase__ )
lowercase__ : List[str]= max([int(lowerCamelCase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ : Dict= parent_dict.get(str(lowerCamelCase__ ) , ["N/A"] )
parents_per_chain.append(lowerCamelCase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ : Union[str, Any]= [["N/A"]]
def make_parent_line(A ) -> str:
return f'''PARENT {' '.join(lowerCamelCase__ )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ : List[Any]= 0
for i, l in enumerate(lowerCamelCase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCamelCase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCamelCase__ ):
lowercase__ : Dict= parents_per_chain[chain_counter]
else:
lowercase__ : List[Any]= ["N/A"]
out_pdb_lines.append(make_parent_line(lowerCamelCase__ ) )
return "\n".join(lowerCamelCase__ )
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : Optional[int]= residue_constants.restypes + ["X"]
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
lowercase__ : Optional[Any]= residue_constants.atom_types
lowercase__ : Any= []
lowercase__ : Union[str, Any]= prot.atom_mask
lowercase__ : Union[str, Any]= prot.aatype
lowercase__ : List[str]= prot.atom_positions
lowercase__ : Tuple= prot.residue_index.astype(np.intaa )
lowercase__ : str= prot.b_factors
lowercase__ : Optional[Any]= prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowercase__ : Any= get_pdb_headers(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
pdb_lines.extend(lowerCamelCase__ )
lowercase__ : Any= aatype.shape[0]
lowercase__ : List[str]= 1
lowercase__ : int= 0
lowercase__ : Dict= string.ascii_uppercase
lowercase__ : List[str]= None
# Add all atom sites.
for i in range(lowerCamelCase__ ):
lowercase__ : List[str]= res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCamelCase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ : List[str]= "ATOM"
lowercase__ : Union[str, Any]= atom_name if len(lowerCamelCase__ ) == 4 else f''' {atom_name}'''
lowercase__ : Tuple= ""
lowercase__ : Any= ""
lowercase__ : Union[str, Any]= 1.00
lowercase__ : str= atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ : Optional[int]= ""
lowercase__ : int= "A"
if chain_index is not None:
lowercase__ : Dict= chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ : Any= (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(lowerCamelCase__ )
atom_index += 1
lowercase__ : Union[str, Any]= i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ : str= True
lowercase__ : Any= chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ : List[Any]= "TER"
lowercase__ : Any= (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(lowerCamelCase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCamelCase__ , lowerCamelCase__ ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(lowerCamelCase__ )
def lowercase__(A ) ->np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowercase__(A , A , A = None , A = None , A = None , A = None , A = None , ) ->Protein:
"""simple docstring"""
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=lowerCamelCase__ , remark=lowerCamelCase__ , parents=lowerCamelCase__ , parents_chain_index=lowerCamelCase__ , )
| 720 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 0 |
"""simple docstring"""
from typing import Any
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= data
lowercase__ : str= None
def __repr__( self ):
'''simple docstring'''
return F'''Node({self.data})'''
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowercase__ : List[str]= None
def __iter__( self ):
'''simple docstring'''
lowercase__ : Tuple= self.head
while node:
yield node.data
lowercase__ : Union[str, Any]= node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __getitem__( self , snake_case__ ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowercase__ : Dict= self.head
for _ in range(_SCREAMING_SNAKE_CASE ):
lowercase__ : Any= current.next
lowercase__ : str= data
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
self.insert_nth(len(self ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
self.insert_nth(0 , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowercase__ : Optional[Any]= Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
lowercase__ : str= new_node
elif index == 0:
lowercase__ : Dict= self.head # link new_node to head
lowercase__ : Tuple= new_node
else:
lowercase__ : Tuple= self.head
for _ in range(index - 1 ):
lowercase__ : Optional[Any]= temp.next
lowercase__ : Tuple= temp.next
lowercase__ : Any= new_node
def UpperCAmelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , snake_case__ = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowercase__ : List[Any]= self.head # default first node
if index == 0:
lowercase__ : Dict= self.head.next
else:
lowercase__ : Dict= self.head
for _ in range(index - 1 ):
lowercase__ : Union[str, Any]= temp.next
lowercase__ : Dict= temp.next
lowercase__ : List[Any]= temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= None
lowercase__ : List[Any]= self.head
while current:
# Store the current node's next node.
lowercase__ : int= current.next
# Make the current node's next point backwards
lowercase__ : List[str]= prev
# Make the previous node be the current node
lowercase__ : int= current
# Make the current node the next node (to progress iteration)
lowercase__ : Optional[Any]= next_node
# Return prev in order to put the head at the end
lowercase__ : Optional[Any]= prev
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : str= LinkedList()
assert linked_list.is_empty() is True
assert str(A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(A ) == i
linked_list.insert_nth(A , i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(A ) == "->".join(str(A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(A ) == 9
assert str(A ) == "->".join(str(A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ : int= -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A ) == "->".join(str(A ) for i in range(-8 , 1 ) )
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Tuple= [
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
lowercase__ : Union[str, Any]= LinkedList()
for i in test_input:
linked_list.insert_tail(A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ : Union[str, Any]= linked_list.delete_head()
assert result == -9
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ : Dict= linked_list.delete_tail()
assert result == 12.2
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ : List[str]= linked_list.delete_nth(10 )
assert result is None
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__() ->Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
lowercase__ : List[str]= LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(A )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowercase__ : List[str]= input("Enter New Value: " ).strip()
print("New list:" )
print(A )
print(f'''length of linked_list is : {len(A )}''' )
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a : Optional[Any] = logging.get_logger(__name__)
def lowercase__(A , A , A ) ->Dict:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= to_pil_image(A )
lowercase__ : Optional[Any]= pil_image.size
lowercase__ : Union[str, Any]= pytesseract.image_to_data(A , lang=A , output_type="dict" , config=A )
lowercase__ : Any= data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowercase__ : List[Any]= [idx for idx, word in enumerate(A ) if not word.strip()]
lowercase__ : int= [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowercase__ : Union[str, Any]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowercase__ : Optional[Any]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowercase__ : Union[str, Any]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowercase__ : Optional[int]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase__ : Dict= []
for x, y, w, h in zip(A , A , A , A ):
lowercase__ : Union[str, Any]= [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowercase__ : str= []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : List[Any]= size if size is not None else {"height": 224, "width": 224}
lowercase__ : Union[str, Any]= get_size_dict(snake_case__ )
lowercase__ : Union[str, Any]= do_resize
lowercase__ : Union[str, Any]= size
lowercase__ : Tuple= resample
lowercase__ : Tuple= do_rescale
lowercase__ : Any= rescale_value
lowercase__ : Optional[Any]= do_normalize
lowercase__ : str= image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Any= image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase__ : int= apply_ocr
lowercase__ : Tuple= ocr_lang
lowercase__ : str= tesseract_config
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ : Dict= (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= do_resize if do_resize is not None else self.do_resize
lowercase__ : Tuple= size if size is not None else self.size
lowercase__ : List[str]= get_size_dict(snake_case__ )
lowercase__ : int= resample if resample is not None else self.resample
lowercase__ : List[Any]= do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Any= rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict= do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : str= image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str]= image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any]= apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase__ : str= ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase__ : Dict= tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase__ : Tuple= make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
lowercase__ : Any= [to_numpy_array(snake_case__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
lowercase__ : Union[str, Any]= []
lowercase__ : List[Any]= []
for image in images:
lowercase__ : Any= apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
lowercase__ : Dict= [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowercase__ : Union[str, Any]= [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowercase__ : List[Any]= [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowercase__ : Optional[Any]= [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowercase__ : Dict= BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
lowercase__ : List[str]= words_batch
lowercase__ : Optional[int]= boxes_batch
return data
| 700 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCAmelCase( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ : str= [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
lowercase__ : str= Counter(snake_case__ )
lowercase__ : Tuple= [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : List[Any]= super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def lowercase__(A ) ->Tuple[Optional[str], str]:
"""simple docstring"""
lowercase__ : List[Any]= list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ : Tuple= full_content[1:].index("---" ) + 1
lowercase__ : str= "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(A )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as readme_file:
lowercase__ : Any= _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if path.exists():
with open(snake_case__ , encoding="utf-8" ) as readme_file:
lowercase__ : str= readme_file.read()
else:
lowercase__ : Any= None
lowercase__ : List[Any]= self._to_readme(snake_case__ )
with open(snake_case__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ = None ):
'''simple docstring'''
if readme_content is not None:
lowercase__ : Optional[Any]= _split_yaml_from_readme(snake_case__ )
lowercase__ : List[str]= "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ : List[Any]= "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ ):
'''simple docstring'''
lowercase__ : str= yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ : Optional[Any]= {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding="utf-8" , ).decode("utf-8" )
a : Union[str, Any] = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a : Union[str, Any] = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
a : Any = ap.parse_args()
a : Any = Path(args.readme_filepath)
a : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 701 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
a : Any = logging.getLogger(__name__)
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : List[Any]= argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=A , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=A , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=A , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=A , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=A , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=A , type=A , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=A , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=A , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : List[str]= parser.parse_args()
return args
def lowercase__(A ) ->Any:
"""simple docstring"""
def fn(A ):
return tokenizer(examples["text"] )
return fn
def lowercase__(A ) ->List[str]:
"""simple docstring"""
lowercase__ : Optional[int]= []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : List[str]= {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Dict= tf.train.Features(feature=A )
lowercase__ : int= tf.train.Example(features=A )
lowercase__ : Tuple= example.SerializeToString()
records.append(A )
return records
def lowercase__(A ) ->Dict:
"""simple docstring"""
lowercase__ : Dict= datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : Union[str, Any]= min(len(A ) , args.limit )
lowercase__ : List[str]= dataset.select(range(A ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
lowercase__ : List[str]= AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : List[str]= os.path.join(args.output_dir , args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
lowercase__ : int= os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : Any= tokenize_function(A )
lowercase__ : Tuple= dataset.map(A , batched=A , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A ):
# Concatenate all texts.
lowercase__ : List[str]= {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int= len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : Tuple= (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int]= {
k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : List[str]= dataset_tokenized.map(A , batched=A , batch_size=1_000 , num_proc=4 )
lowercase__ : Union[str, Any]= 0
lowercase__ : Any= 0
for shard in range(0 , len(A ) , args.shard_size ):
lowercase__ : Optional[int]= grouped_dataset[shard : shard + args.shard_size]
lowercase__ : Optional[Any]= len(dataset_snapshot["input_ids"] )
lowercase__ : Optional[int]= os.path.join(A , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
lowercase__ : Optional[int]= get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
lowercase__ : Tuple= serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A , A ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=A )
if __name__ == "__main__":
a : Optional[Any] = parse_args()
main(args)
| 702 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a : Union[str, Any] = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "ernie_m"
__lowerCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , snake_case__ = 250002 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 3072 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 514 , snake_case__ = 0.02 , snake_case__ = 1 , snake_case__ = 1e-05 , snake_case__=None , snake_case__=False , snake_case__=0.0 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowercase__ : Any= vocab_size
lowercase__ : List[Any]= hidden_size
lowercase__ : Union[str, Any]= num_hidden_layers
lowercase__ : Dict= num_attention_heads
lowercase__ : List[str]= intermediate_size
lowercase__ : List[Any]= hidden_act
lowercase__ : Optional[int]= hidden_dropout_prob
lowercase__ : Dict= attention_probs_dropout_prob
lowercase__ : str= max_position_embeddings
lowercase__ : Any= initializer_range
lowercase__ : Tuple= layer_norm_eps
lowercase__ : Tuple= classifier_dropout
lowercase__ : int= is_decoder
lowercase__ : List[str]= act_dropout
| 703 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : Dict= LxmertConfig.from_json_file(A )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase__ : Optional[int]= LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 705 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "deta"
__lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , snake_case__=None , snake_case__=900 , snake_case__=2048 , snake_case__=6 , snake_case__=2048 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__=5 , snake_case__=4 , snake_case__=4 , snake_case__=True , snake_case__=300 , snake_case__=True , snake_case__=True , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.25 , **snake_case__ , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int]= CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= backbone_config.pop("model_type" )
lowercase__ : Optional[int]= CONFIG_MAPPING[backbone_model_type]
lowercase__ : Tuple= config_class.from_dict(snake_case__ )
lowercase__ : List[str]= backbone_config
lowercase__ : List[str]= num_queries
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : Union[str, Any]= d_model
lowercase__ : Tuple= encoder_ffn_dim
lowercase__ : List[Any]= encoder_layers
lowercase__ : Dict= encoder_attention_heads
lowercase__ : List[Any]= decoder_ffn_dim
lowercase__ : Any= decoder_layers
lowercase__ : Dict= decoder_attention_heads
lowercase__ : Optional[int]= dropout
lowercase__ : List[Any]= attention_dropout
lowercase__ : List[Any]= activation_dropout
lowercase__ : int= activation_function
lowercase__ : Optional[Any]= init_std
lowercase__ : str= init_xavier_std
lowercase__ : Tuple= encoder_layerdrop
lowercase__ : Union[str, Any]= auxiliary_loss
lowercase__ : Any= position_embedding_type
# deformable attributes
lowercase__ : int= num_feature_levels
lowercase__ : Union[str, Any]= encoder_n_points
lowercase__ : str= decoder_n_points
lowercase__ : str= two_stage
lowercase__ : Optional[int]= two_stage_num_proposals
lowercase__ : Any= with_box_refine
lowercase__ : int= assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : int= class_cost
lowercase__ : Any= bbox_cost
lowercase__ : int= giou_cost
# Loss coefficients
lowercase__ : List[Any]= mask_loss_coefficient
lowercase__ : List[str]= dice_loss_coefficient
lowercase__ : Optional[int]= bbox_loss_coefficient
lowercase__ : Any= giou_loss_coefficient
lowercase__ : Any= eos_coefficient
lowercase__ : List[str]= focal_alpha
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= copy.deepcopy(self.__dict__ )
lowercase__ : int= self.backbone_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 706 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 0 |
"""simple docstring"""
def lowercase__(A = 200 ) ->int:
"""simple docstring"""
lowercase__ : Union[str, Any]= [1, 2, 5, 10, 20, 50, 100, 200]
lowercase__ : int= [0] * (pence + 1)
lowercase__ : Optional[int]= 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 707 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a : str = logging.get_logger(__name__)
a : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
a : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase__(A ) ->List[str]:
"""simple docstring"""
lowercase__ : Optional[int]= {}
with open(A , "r" ) as file:
for line_number, line in enumerate(A ):
lowercase__ : Optional[Any]= line.strip()
if line:
lowercase__ : Tuple= line.split()
lowercase__ : Optional[int]= line_number
lowercase__ : Optional[Any]= words[0]
lowercase__ : Optional[int]= value
return result
def lowercase__(A , A , A , A , A ) ->int:
"""simple docstring"""
for attribute in key.split("." ):
lowercase__ : int= getattr(A , A )
lowercase__ : Optional[Any]= None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
lowercase__ : Optional[int]= PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ : List[Any]= "param"
if weight_type is not None and weight_type != "param":
lowercase__ : Union[str, Any]= getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ : Dict= hf_pointer
for attribute in hf_param_name.split("." ):
lowercase__ : Optional[int]= getattr(A , A )
lowercase__ : List[str]= shape_pointer.shape
# let's reduce dimension
lowercase__ : int= value[0]
else:
lowercase__ : List[str]= hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ : int= value
elif weight_type == "weight_g":
lowercase__ : Any= value
elif weight_type == "weight_v":
lowercase__ : List[str]= value
elif weight_type == "bias":
lowercase__ : str= value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowercase__ : List[str]= getattr(A , A )
lowercase__ : Any= value
else:
lowercase__ : str= value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__(A , A , A , A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : int= None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
lowercase__ : Union[str, Any]= PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ : Dict= "param"
if weight_type is not None and weight_type != "param":
lowercase__ : Optional[int]= ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ : List[str]= ".".join([key, hf_param_name] )
else:
lowercase__ : Union[str, Any]= key
lowercase__ : Union[str, Any]= value if "lm_head" in full_key else value[0]
a : int = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase__(A , A , A=None , A=None ) ->Dict:
"""simple docstring"""
lowercase__ : Optional[Any]= False
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any]= "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ : Any= True
if "*" in mapped_key:
lowercase__ : Optional[Any]= name.split(A )[0].split("." )[-2]
lowercase__ : str= mapped_key.replace("*" , A )
if "weight_g" in name:
lowercase__ : Optional[int]= "weight_g"
elif "weight_v" in name:
lowercase__ : Optional[int]= "weight_v"
elif "bias" in name:
lowercase__ : List[str]= "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Tuple= "weight"
else:
lowercase__ : str= None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def lowercase__(A , A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
lowercase__ : Any= fairseq_model.state_dict()
lowercase__ : int= hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : List[str]= False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
lowercase__ : int= True
else:
lowercase__ : Optional[int]= load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase__(A , A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int]= full_name.split("conv_layers." )[-1]
lowercase__ : Any= name.split("." )
lowercase__ : List[Any]= int(items[0] )
lowercase__ : List[str]= int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ : Tuple= value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ : int= value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ : Optional[int]= value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ : List[Any]= value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def lowercase__(A , A , A=None , A=None , A=True , A=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
lowercase__ : Any= WavaVecaConfig.from_pretrained(A )
else:
lowercase__ : List[str]= WavaVecaConfig()
if is_seq_class:
lowercase__ : List[Any]= read_txt_into_dict(A )
lowercase__ : int= idalabel
lowercase__ : Any= WavaVecaForSequenceClassification(A )
lowercase__ : Optional[Any]= WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
lowercase__ : List[str]= Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Tuple= target_dict.pad_index
lowercase__ : str= target_dict.bos_index
lowercase__ : int= target_dict.eos_index
lowercase__ : Any= len(target_dict.symbols )
lowercase__ : int= os.path.join(A , "vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A , exist_ok=A )
lowercase__ : Optional[int]= target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : Union[str, Any]= 0
lowercase__ : Dict= 1
with open(A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(A , A )
lowercase__ : Union[str, Any]= WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A , )
lowercase__ : List[str]= True if config.feat_extract_norm == "layer" else False
lowercase__ : int= WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowercase__ : str= WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowercase__ : int= WavaVecaForCTC(A )
else:
lowercase__ : Union[str, Any]= WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
lowercase__ : List[Any]= fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ : List[Any]= argparse.Namespace(task="audio_pretraining" )
lowercase__ : Optional[int]= fairseq.tasks.setup_task(A )
lowercase__ : Tuple= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowercase__ : Optional[int]= model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
a : List[str] = parser.parse_args()
a : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 708 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 0 |
from __future__ import annotations
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : Dict= [True] * limit
lowercase__ : Any= False
lowercase__ : Dict= False
lowercase__ : Optional[int]= True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase__ : List[Any]= i * 2
while index < limit:
lowercase__ : Dict= False
lowercase__ : int= index + i
lowercase__ : Optional[Any]= [2]
for i in range(3 , A , 2 ):
if is_prime[i]:
primes.append(A )
return primes
def lowercase__(A = 1_000_000 ) ->int:
"""simple docstring"""
lowercase__ : str= prime_sieve(A )
lowercase__ : Optional[int]= 0
lowercase__ : Optional[Any]= 0
for i in range(len(A ) ):
for j in range(i + length , len(A ) ):
lowercase__ : Tuple= sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : str= j - i
lowercase__ : Dict= sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 710 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a : Optional[Any] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase__(A , A=None ) ->Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , A )
| 711 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 0 |
"""simple docstring"""
import math
import qiskit
def lowercase__(A = 1 , A = 1 , A = 1 ) ->qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(A , A )
or isinstance(A , A )
or isinstance(A , A )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(A ) != input_a)
or (math.floor(A ) != input_a)
or (math.floor(A ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowercase__ : List[str]= qiskit.QuantumRegister(4 , "qr" )
lowercase__ : Dict= qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowercase__ : Optional[int]= [input_a, input_a, carry_in]
lowercase__ : Any= qiskit.QuantumCircuit(A , A )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(A ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , A ) # measure the last two qbits
lowercase__ : int= qiskit.Aer.get_backend("aer_simulator" )
lowercase__ : Union[str, Any]= qiskit.execute(A , A , shots=1_000 )
return job.result().get_counts(A )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 712 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a : str = TypeVar("""KT""")
a : Optional[Any] = TypeVar("""VT""")
class __UpperCAmelCase( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , snake_case__ = "root" , snake_case__ = None ):
'''simple docstring'''
lowercase__ : str= key
lowercase__ : Dict= value
lowercase__ : list[Node[KT, VT]]= []
def __repr__( self ):
'''simple docstring'''
return F'''Node({self.key}: {self.value})'''
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.forward )
class __UpperCAmelCase( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , snake_case__ = 0.5 , snake_case__ = 16 ):
'''simple docstring'''
lowercase__ : Node[KT, VT]= Node[KT, VT]()
lowercase__ : Any= 0
lowercase__ : List[str]= p
lowercase__ : Optional[int]= max_level
def __str__( self ):
'''simple docstring'''
lowercase__ : List[str]= list(self )
if len(snake_case__ ) == 0:
return F'''SkipList(level={self.level})'''
lowercase__ : Any= max((len(str(snake_case__ ) ) for item in items) , default=4 )
lowercase__ : List[Any]= max(snake_case__ , 4 ) + 4
lowercase__ : Tuple= self.head
lowercase__ : Optional[int]= []
lowercase__ : Optional[Any]= node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(snake_case__ , "-" ) + "* " * len(snake_case__ ) )
lines.append(" " * label_size + "| " * len(snake_case__ ) )
while len(node.forward ) != 0:
lowercase__ : Dict= node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(snake_case__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(snake_case__ ) )
lowercase__ : Union[str, Any]= node.forward
lines.append("None".ljust(snake_case__ ) + "* " * len(snake_case__ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(snake_case__ )
def __iter__( self ):
'''simple docstring'''
lowercase__ : Tuple= self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowercase__ : Dict= node.forward[0]
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= []
lowercase__ : int= self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowercase__ : Optional[int]= node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self._locate_node(snake_case__ )
if node is not None:
for i, update_node in enumerate(snake_case__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowercase__ : Union[str, Any]= node.forward[i]
else:
lowercase__ : Optional[Any]= update_node.forward[:i]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= self._locate_node(snake_case__ )
if node is not None:
lowercase__ : List[Any]= value
else:
lowercase__ : Any= self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case__ ):
update_vector.append(self.head )
lowercase__ : Optional[int]= level
lowercase__ : Union[str, Any]= Node(snake_case__ , snake_case__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case__ )
else:
lowercase__ : Tuple= new_node
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self._locate_node(snake_case__ )
if node is not None:
return node.value
return None
def lowercase__() ->List[str]:
"""simple docstring"""
lowercase__ : Dict= SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
lowercase__ : Tuple= skip_list.head
lowercase__ : str= {}
while node.level != 0:
lowercase__ : List[str]= node.forward[0]
lowercase__ : List[str]= node.value
assert len(A ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : Optional[int]= SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
lowercase__ : List[Any]= skip_list.head
lowercase__ : str= {}
while node.level != 0:
lowercase__ : Union[str, Any]= node.forward[0]
lowercase__ : List[str]= node.value
if len(A ) != 4:
print()
assert len(A ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : List[Any]= SkipList()
assert skip_list.find("Some key" ) is None
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int]= SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : Optional[int]= SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : List[str]= SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any]= SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(A ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase__() ->Dict:
"""simple docstring"""
def is_sorted(A ):
return all(next_item >= item for item, next_item in zip(A , lst[1:] ) )
lowercase__ : List[str]= SkipList()
for i in range(10 ):
skip_list.insert(A , A )
assert is_sorted(list(A ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A ) )
def lowercase__() ->Optional[int]:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : Dict= SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : str = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "markuplm"
def __init__( self , snake_case__=30522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0 , snake_case__=2 , snake_case__=256 , snake_case__=1024 , snake_case__=216 , snake_case__=1001 , snake_case__=32 , snake_case__=50 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Optional[Any]= vocab_size
lowercase__ : Any= hidden_size
lowercase__ : Dict= num_hidden_layers
lowercase__ : str= num_attention_heads
lowercase__ : Any= hidden_act
lowercase__ : Union[str, Any]= intermediate_size
lowercase__ : int= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : List[str]= max_position_embeddings
lowercase__ : Tuple= type_vocab_size
lowercase__ : Tuple= initializer_range
lowercase__ : Optional[int]= layer_norm_eps
lowercase__ : List[Any]= position_embedding_type
lowercase__ : Optional[Any]= use_cache
lowercase__ : Tuple= classifier_dropout
# additional properties
lowercase__ : Union[str, Any]= max_depth
lowercase__ : int= max_xpath_tag_unit_embeddings
lowercase__ : Optional[Any]= max_xpath_subs_unit_embeddings
lowercase__ : Any= tag_pad_id
lowercase__ : str= subs_pad_id
lowercase__ : Union[str, Any]= xpath_unit_hidden_size
| 714 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 0 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 715 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__(A , A ) ->str:
"""simple docstring"""
assert isinstance(A , A )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase__(A , A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Any= tmp_path / "cache"
lowercase__ : Any= {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any]= TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase__(A , A , A ) ->str:
"""simple docstring"""
lowercase__ : int= tmp_path / "cache"
lowercase__ : Optional[Any]= {"text": "string"}
lowercase__ : int= features.copy() if features else default_expected_features
lowercase__ : List[str]= (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : str= TextDatasetReader(A , features=A , cache_dir=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase__(A , A , A ) ->List[str]:
"""simple docstring"""
lowercase__ : Optional[Any]= tmp_path / "cache"
lowercase__ : Tuple= {"text": "string"}
lowercase__ : List[Any]= TextDatasetReader(A , cache_dir=A , split=A ).read()
_check_text_dataset(A , A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowercase__(A , A , A ) ->List[Any]:
"""simple docstring"""
if issubclass(A , A ):
lowercase__ : Tuple= text_path
elif issubclass(A , A ):
lowercase__ : Union[str, Any]= [text_path]
lowercase__ : List[Any]= tmp_path / "cache"
lowercase__ : str= {"text": "string"}
lowercase__ : Dict= TextDatasetReader(A , cache_dir=A ).read()
_check_text_dataset(A , A )
def lowercase__(A , A , A=("train",) ) ->Optional[Any]:
"""simple docstring"""
assert isinstance(A , A )
for split in splits:
lowercase__ : List[Any]= dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase__(A , A , A ) ->str:
"""simple docstring"""
lowercase__ : Optional[int]= tmp_path / "cache"
lowercase__ : Union[str, Any]= {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Tuple= TextDatasetReader({"train": text_path} , cache_dir=A , keep_in_memory=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase__(A , A , A ) ->Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int]= tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowercase__ : Optional[int]= {"text": "string"}
lowercase__ : List[str]= features.copy() if features else default_expected_features
lowercase__ : str= (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Union[str, Any]= TextDatasetReader({"train": text_path} , features=A , cache_dir=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
if split:
lowercase__ : int= {split: text_path}
else:
lowercase__ : str= "train"
lowercase__ : int= {"train": text_path, "test": text_path}
lowercase__ : Any= tmp_path / "cache"
lowercase__ : Optional[Any]= {"text": "string"}
lowercase__ : List[Any]= TextDatasetReader(A , cache_dir=A ).read()
_check_text_datasetdict(A , A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 716 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Dict= n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 717 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 0 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
lowercase__ : List[Any]= int(A )
if n_element < 1:
lowercase__ : Any= ValueError("a should be a positive number" )
raise my_error
lowercase__ : Any= [1]
lowercase__ : Optional[int]= (0, 0, 0)
lowercase__ : List[Any]= 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
a : str = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 718 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a : Tuple = logging.get_logger(__name__)
a : Any = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= max_length
lowercase__ : Optional[int]= max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= input_ids.shape[-1]
lowercase__ : List[Any]= cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , snake_case__ , )
lowercase__ : Tuple= start_length
lowercase__ : Any= max_new_tokens
lowercase__ : Any= start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Dict= max_time
lowercase__ : List[Any]= time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def lowercase__(A , A ) ->StoppingCriteriaList:
"""simple docstring"""
lowercase__ : Any= stopping_criteria.max_length
lowercase__ : str= deepcopy(A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=A ) )
return new_stopping_criteria
| 719 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->bool:
"""simple docstring"""
if len(A ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase__ : Tuple= nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : Optional[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
a : Optional[Any] = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
a : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : int= (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ : int= images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : List[Any]= numpy_to_pil(A )
return images
def lowercase__(A ) ->List[str]:
"""simple docstring"""
if images.ndim == 3:
lowercase__ : List[Any]= images[None, ...]
lowercase__ : str= (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ : str= [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase__ : List[str]= [Image.fromarray(A ) for image in images]
return pil_images
| 700 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase__(A , A="shi-labs/oneformer_demo" ) ->Any:
"""simple docstring"""
with open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) as f:
lowercase__ : List[str]= json.load(A )
lowercase__ : str= {}
lowercase__ : int= []
lowercase__ : Optional[int]= []
for key, info in class_info.items():
lowercase__ : List[str]= info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(A ) )
lowercase__ : Union[str, Any]= thing_ids
lowercase__ : List[Any]= class_names
return metadata
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=None , snake_case__=True , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=10 , snake_case__=False , snake_case__=255 , snake_case__="shi-labs/oneformer_demo" , snake_case__="ade20k_panoptic.json" , snake_case__=10 , ):
'''simple docstring'''
lowercase__ : Tuple= parent
lowercase__ : List[Any]= batch_size
lowercase__ : Tuple= num_channels
lowercase__ : Optional[int]= min_resolution
lowercase__ : List[str]= max_resolution
lowercase__ : str= do_resize
lowercase__ : Any= {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
lowercase__ : int= do_normalize
lowercase__ : Any= image_mean
lowercase__ : Union[str, Any]= image_std
lowercase__ : str= class_info_file
lowercase__ : Tuple= prepare_metadata(snake_case__ , snake_case__ )
lowercase__ : Optional[Any]= num_text
lowercase__ : Union[str, Any]= repo_path
# for the post_process_functions
lowercase__ : str= 2
lowercase__ : str= 10
lowercase__ : Union[str, Any]= 10
lowercase__ : Optional[int]= 3
lowercase__ : Any= 4
lowercase__ : str= num_labels
lowercase__ : str= do_reduce_labels
lowercase__ : Any= ignore_index
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False ):
'''simple docstring'''
if not batched:
lowercase__ : int= image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowercase__ : Tuple= image.size
else:
lowercase__ : Dict= image.shape[1], image.shape[2]
if w < h:
lowercase__ : List[str]= int(self.size["shortest_edge"] * h / w )
lowercase__ : Any= self.size["shortest_edge"]
elif w > h:
lowercase__ : Optional[Any]= self.size["shortest_edge"]
lowercase__ : int= int(self.size["shortest_edge"] * w / h )
else:
lowercase__ : List[Any]= self.size["shortest_edge"]
lowercase__ : List[str]= self.size["shortest_edge"]
else:
lowercase__ : Optional[int]= []
for image in image_inputs:
lowercase__ : str= self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Union[str, Any]= max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowercase__ : Union[str, Any]= max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCamelCase = image_processing_class
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= OneFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "ignore_index" ) )
self.assertTrue(hasattr(snake_case__ , "class_info_file" ) )
self.assertTrue(hasattr(snake_case__ , "num_text" ) )
self.assertTrue(hasattr(snake_case__ , "repo_path" ) )
self.assertTrue(hasattr(snake_case__ , "metadata" ) )
self.assertTrue(hasattr(snake_case__ , "do_reduce_labels" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : str= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowercase__ : List[Any]= image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowercase__ : str= self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[str]= self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowercase__ : str= image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Any= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowercase__ : int= image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowercase__ : Optional[Any]= self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Dict= self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowercase__ : List[str]= image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : str= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowercase__ : Tuple= image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowercase__ : Optional[Any]= self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : int= self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowercase__ : Optional[int]= image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self , snake_case__=False , snake_case__=False , snake_case__="np" ):
'''simple docstring'''
lowercase__ : int= self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowercase__ : str= self.image_processing_tester.num_labels
lowercase__ : int= None
lowercase__ : List[Any]= None
lowercase__ : List[str]= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
if with_segmentation_maps:
lowercase__ : Dict= num_labels
if is_instance_map:
lowercase__ : Tuple= list(range(snake_case__ ) ) * 2
lowercase__ : Tuple= dict(enumerate(snake_case__ ) )
lowercase__ : Dict= [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowercase__ : int= [Image.fromarray(snake_case__ ) for annotation in annotations]
lowercase__ : int= image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , snake_case__ , return_tensors="pt" , instance_id_to_semantic_id=snake_case__ , pad_and_return_pixel_mask=snake_case__ , )
return inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
def common(snake_case__=False , snake_case__=None ):
lowercase__ : Tuple= self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case__ , is_instance_map=snake_case__ , segmentation_type=snake_case__ )
lowercase__ : Dict= inputs["mask_labels"]
lowercase__ : List[str]= inputs["class_labels"]
lowercase__ : Optional[Any]= inputs["pixel_values"]
lowercase__ : Dict= inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case__ , snake_case__ , snake_case__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case__ )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= np.zeros((20, 50) )
lowercase__ : int= 1
lowercase__ : Optional[Any]= 1
lowercase__ : str= 1
lowercase__ : int= binary_mask_to_rle(snake_case__ )
self.assertEqual(len(snake_case__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowercase__ : Dict= self.image_processing_tester.get_fake_oneformer_outputs()
lowercase__ : Tuple= fature_extractor.post_process_semantic_segmentation(snake_case__ )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowercase__ : Dict= [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowercase__ : Tuple= fature_extractor.post_process_semantic_segmentation(snake_case__ , target_sizes=snake_case__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowercase__ : List[Any]= self.image_processing_tester.get_fake_oneformer_outputs()
lowercase__ : List[str]= image_processor.post_process_instance_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowercase__ : int= self.image_processing_tester.get_fake_oneformer_outputs()
lowercase__ : Union[str, Any]= image_processor.post_process_panoptic_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 701 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , ):
'''simple docstring'''
super().__init__()
lowercase__ : List[str]= nn.Embedding(snake_case__ , snake_case__ )
lowercase__ : str= nn.Embedding(snake_case__ , snake_case__ )
lowercase__ : Tuple= False
lowercase__ : Union[str, Any]= nn.Dropout(p=snake_case__ )
lowercase__ : Optional[int]= TaConfig(
vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , )
lowercase__ : Union[str, Any]= nn.ModuleList()
for lyr_num in range(snake_case__ ):
lowercase__ : Tuple= TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
lowercase__ : List[Any]= TaLayerNorm(snake_case__ )
lowercase__ : Optional[int]= nn.Dropout(p=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.token_embedder(snake_case__ )
lowercase__ : Any= encoder_input_tokens.shape[1]
lowercase__ : Tuple= torch.arange(snake_case__ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
lowercase__ : Dict= self.dropout_pre(snake_case__ )
# inverted the attention mask
lowercase__ : Union[str, Any]= encoder_input_tokens.size()
lowercase__ : Dict= self.get_extended_attention_mask(snake_case__ , snake_case__ )
for lyr in self.encoders:
lowercase__ : int= lyr(snake_case__ , snake_case__ )[0]
lowercase__ : Tuple= self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask
| 702 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
a : Dict = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["input_features", "attention_mask"]
def __init__( self , snake_case__=80 , snake_case__=16000 , snake_case__=0.0 , snake_case__=10 , snake_case__=25 , snake_case__="hamming_window" , snake_case__=32768.0 , snake_case__=0.97 , snake_case__=1.0 , snake_case__=True , snake_case__=True , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , **snake_case__ )
lowercase__ : int= feature_size
lowercase__ : Any= sampling_rate
lowercase__ : List[str]= padding_value
lowercase__ : str= hop_length
lowercase__ : Optional[int]= win_length
lowercase__ : Any= frame_signal_scale
lowercase__ : List[str]= preemphasis_coeff
lowercase__ : Optional[Any]= mel_floor
lowercase__ : Optional[int]= normalize_means
lowercase__ : List[str]= normalize_vars
lowercase__ : Dict= win_function
lowercase__ : List[str]= return_attention_mask
lowercase__ : Any= win_length * sampling_rate // 1000
lowercase__ : List[Any]= hop_length * sampling_rate // 1000
lowercase__ : Union[str, Any]= optimal_fft_length(self.sample_size )
lowercase__ : Any= (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowercase__ : List[str]= window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case__ )
else:
lowercase__ : Optional[Any]= window_function(window_length=self.sample_size , name=self.win_function )
lowercase__ : Any= mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase__ : Dict= spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case__ , preemphasis=self.preemphasis_coeff , mel_filters=snake_case__ , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase__ : Optional[int]= x[:input_length].mean(axis=0 )
lowercase__ : int= np.subtract(snake_case__ , snake_case__ )
if self.normalize_vars:
lowercase__ : List[str]= x[:input_length].std(axis=0 )
lowercase__ : Union[str, Any]= np.divide(snake_case__ , snake_case__ )
if input_length < x.shape[0]:
lowercase__ : Optional[int]= padding_value
# make sure array is in float32
lowercase__ : Union[str, Any]= x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Optional[int]= attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case__ , snake_case__ , self.padding_value ) for x, n in zip(snake_case__ , snake_case__ )]
def __call__( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : Optional[int]= isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ : int= is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : Union[str, Any]= [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
lowercase__ : Optional[Any]= np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : Union[str, Any]= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : List[str]= [raw_speech]
# extract fbank features
lowercase__ : List[Any]= [self._extract_mfsc_features(snake_case__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase__ : Optional[int]= BatchFeature({"input_features": features} )
lowercase__ : Dict= self.pad(
snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
# make sure list is in array format
lowercase__ : Dict= padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case__ ):
lowercase__ : Any= [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_features]
lowercase__ : int= padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowercase__ : List[Any]= [np.asarray(snake_case__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase__ : List[str]= (
np.array(snake_case__ , dtype=np.intaa )
if self._get_padding_strategies(snake_case__ , max_length=snake_case__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase__ : int= self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case__ )
if return_tensors is not None:
lowercase__ : Tuple= padded_inputs.convert_to_tensors(snake_case__ )
return padded_inputs
| 703 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : List[str] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = "levit"
def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=3 , snake_case__=2 , snake_case__=1 , snake_case__=16 , snake_case__=[128, 256, 384] , snake_case__=[4, 8, 12] , snake_case__=[4, 4, 4] , snake_case__=[16, 16, 16] , snake_case__=0 , snake_case__=[2, 2, 2] , snake_case__=[2, 2, 2] , snake_case__=0.02 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Optional[int]= image_size
lowercase__ : Any= num_channels
lowercase__ : int= kernel_size
lowercase__ : int= stride
lowercase__ : int= padding
lowercase__ : str= hidden_sizes
lowercase__ : str= num_attention_heads
lowercase__ : int= depths
lowercase__ : Dict= key_dim
lowercase__ : int= drop_path_rate
lowercase__ : Dict= patch_size
lowercase__ : List[str]= attention_ratio
lowercase__ : Optional[int]= mlp_ratio
lowercase__ : str= initializer_range
lowercase__ : int= [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 1e-4
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a : Optional[Any] = logging.getLogger(__name__)
a : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(SCREAMING_SNAKE_CASE__ )} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "The input training data file (a text file)."} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
__lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether ot not to use whole word mask."} )
__lowerCamelCase = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
__lowerCamelCase = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
__lowerCamelCase = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
__lowerCamelCase = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__(A , A , A = False , A = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(A , A=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int]= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ : List[str]= parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ : List[str]= AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Optional[Any]= AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ : List[str]= CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
lowercase__ : Union[str, Any]= AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Optional[int]= AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
lowercase__ : Dict= AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
lowercase__ : List[str]= AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
lowercase__ : Optional[int]= tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ : Dict= min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ : Tuple= (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ : List[Any]= (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ : Optional[int]= DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ : Any= DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
lowercase__ : Optional[int]= DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ : Tuple= Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
lowercase__ : Dict= (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : Any= {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ : List[str]= trainer.evaluate()
lowercase__ : List[Any]= math.exp(eval_output["eval_loss"] )
lowercase__ : str= {"perplexity": perplexity}
lowercase__ : Any= os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , A , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(A )
return results
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a : int = random.Random()
def lowercase__(A , A=1.0 , A=None , A=None ) ->List[Any]:
"""simple docstring"""
if rng is None:
lowercase__ : int= global_rng
lowercase__ : List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=400 , snake_case__=2000 , snake_case__=2048 , snake_case__=128 , snake_case__=1 , snake_case__=512 , snake_case__=30 , snake_case__=44100 , ):
'''simple docstring'''
lowercase__ : Dict= parent
lowercase__ : Tuple= batch_size
lowercase__ : int= min_seq_length
lowercase__ : Union[str, Any]= max_seq_length
lowercase__ : str= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : str= spectrogram_length
lowercase__ : Optional[Any]= feature_size
lowercase__ : Any= num_audio_channels
lowercase__ : Tuple= hop_length
lowercase__ : Optional[Any]= chunk_length
lowercase__ : Union[str, Any]= sampling_rate
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase_ ( self , snake_case__=False , snake_case__=False ):
'''simple docstring'''
def _flatten(snake_case__ ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
lowercase__ : Tuple= [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : str= [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : List[str]= [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = TvltFeatureExtractor
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= TvltFeatureExtractionTester(self )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case__ , "spectrogram_length" ) )
self.assertTrue(hasattr(snake_case__ , "feature_size" ) )
self.assertTrue(hasattr(snake_case__ , "num_audio_channels" ) )
self.assertTrue(hasattr(snake_case__ , "hop_length" ) )
self.assertTrue(hasattr(snake_case__ , "chunk_length" ) )
self.assertTrue(hasattr(snake_case__ , "sampling_rate" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Tuple= feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
lowercase__ : List[Any]= self.feature_extraction_class.from_pretrained(snake_case__ )
lowercase__ : int= feat_extract_first.to_dict()
lowercase__ : Optional[Any]= feat_extract_second.to_dict()
lowercase__ : Optional[int]= dict_first.pop("mel_filters" )
lowercase__ : List[str]= dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : int= os.path.join(snake_case__ , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case__ )
lowercase__ : List[Any]= self.feature_extraction_class.from_json_file(snake_case__ )
lowercase__ : int= feat_extract_first.to_dict()
lowercase__ : List[Any]= feat_extract_second.to_dict()
lowercase__ : Optional[Any]= dict_first.pop("mel_filters" )
lowercase__ : str= dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[Any]= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : List[Any]= [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
lowercase__ : Optional[Any]= feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowercase__ : List[Any]= feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowercase__ : Optional[int]= feature_extractor(
snake_case__ , return_tensors="np" , sampling_rate=44100 , mask_audio=snake_case__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowercase__ : Union[str, Any]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : Optional[int]= np.asarray(snake_case__ )
lowercase__ : Any= feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : str= ds.sort("id" ).select(range(snake_case__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self._load_datasamples(1 )
lowercase__ : List[str]= TvltFeatureExtractor()
lowercase__ : Optional[int]= feature_extractor(snake_case__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowercase__ : str= torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1e-4 ) )
| 706 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 0 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 708 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 0 |
def lowercase__(A ) ->int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase__ : List[str]= 1
lowercase__ : Optional[Any]= 1
while repunit:
lowercase__ : Dict= (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__(A = 1_000_000 ) ->int:
"""simple docstring"""
lowercase__ : Any= limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 0 |
"""simple docstring"""
def lowercase__(A = 10 , A = 1_000 , A = True ):
"""simple docstring"""
assert (
isinstance(A , A )
and isinstance(A , A )
and isinstance(A , A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowercase__(A , A ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def lowercase__(A , A , A ):
"""simple docstring"""
assert (
isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
lowercase__ : List[Any]= lower
lowercase__ : int= higher
lowercase__ : Optional[int]= []
while True:
lowercase__ : Dict= get_avg(A , A )
last_numbers.append(A )
if answer(A ) == "low":
lowercase__ : str= number
elif answer(A ) == "high":
lowercase__ : Any= number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def lowercase__():
"""simple docstring"""
lowercase__ : Union[str, Any]= int(input("Enter lower value : " ).strip() )
lowercase__ : Any= int(input("Enter high value : " ).strip() )
lowercase__ : str= int(input("Enter value to guess : " ).strip() )
guess_the_number(A , A , A )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 0 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __UpperCAmelCase:
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ : Dict= TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : Any= AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : List[Any]= UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ : Optional[int]= DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any]= IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ : Dict= TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : int= AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : int= UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ : List[Any]= DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ : int= DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ : Tuple= IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dummy_components()
lowercase__ : str= self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= self.get_dummy_inputs(snake_case__ )
lowercase__ : Union[str, Any]= inputs["prompt"]
lowercase__ : str= inputs["generator"]
lowercase__ : List[Any]= inputs["num_inference_steps"]
lowercase__ : Optional[Any]= inputs["output_type"]
if "image" in inputs:
lowercase__ : str= inputs["image"]
else:
lowercase__ : int= None
if "mask_image" in inputs:
lowercase__ : str= inputs["mask_image"]
else:
lowercase__ : Optional[int]= None
if "original_image" in inputs:
lowercase__ : Optional[int]= inputs["original_image"]
else:
lowercase__ : Any= None
lowercase__ : Tuple= pipe.encode_prompt(snake_case__ )
# inputs with prompt converted to embeddings
lowercase__ : str= {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ : Dict= image
if mask_image is not None:
lowercase__ : Tuple= mask_image
if original_image is not None:
lowercase__ : Dict= original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : Optional[Any]= pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case__ , snake_case__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ : Tuple= self.get_dummy_inputs(snake_case__ )
lowercase__ : Tuple= inputs["generator"]
lowercase__ : Union[str, Any]= inputs["num_inference_steps"]
lowercase__ : Any= inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ : Optional[Any]= {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ : int= image
if mask_image is not None:
lowercase__ : int= mask_image
if original_image is not None:
lowercase__ : List[str]= original_image
lowercase__ : Union[str, Any]= pipe_loaded(**snake_case__ )[0]
lowercase__ : Optional[Any]= np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_components()
lowercase__ : Tuple= self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Dict= self.get_dummy_inputs(snake_case__ )
lowercase__ : Dict= pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
lowercase__ : List[str]= self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ : Tuple= self.get_dummy_inputs(snake_case__ )
lowercase__ : List[str]= pipe_loaded(**snake_case__ )[0]
lowercase__ : Union[str, Any]= np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
| 711 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a : Union[str, Any] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Any= size if size is not None else {"shortest_edge": 224}
lowercase__ : str= get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowercase__ : Dict= crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase__ : Optional[int]= get_size_dict(snake_case__ , param_name="crop_size" )
lowercase__ : Optional[Any]= do_resize
lowercase__ : List[str]= size
lowercase__ : Optional[int]= resample
lowercase__ : Optional[int]= do_rescale
lowercase__ : List[Any]= rescale_factor
lowercase__ : int= do_center_crop
lowercase__ : str= crop_size
lowercase__ : Any= do_flip_channel_order
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = PIL.Image.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : List[str]= get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return flip_channel_order(snake_case__ , data_format=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= do_resize if do_resize is not None else self.do_resize
lowercase__ : List[Any]= resample if resample is not None else self.resample
lowercase__ : List[Any]= do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : str= rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[Any]= do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : List[str]= (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase__ : Any= size if size is not None else self.size
lowercase__ : Union[str, Any]= get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowercase__ : str= crop_size if crop_size is not None else self.crop_size
lowercase__ : Union[str, Any]= get_size_dict(snake_case__ , param_name="crop_size" )
lowercase__ : Dict= make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase__ : str= [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowercase__ : Tuple= [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
lowercase__ : List[str]= [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
lowercase__ : int= [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase__ : List[Any]= [self.flip_channel_order(image=snake_case__ ) for image in images]
lowercase__ : Optional[Any]= [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowercase__ : Any= {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Tuple= outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
lowercase__ : Any= target_sizes.numpy()
lowercase__ : List[str]= []
for idx in range(len(snake_case__ ) ):
lowercase__ : Optional[Any]= torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
lowercase__ : Dict= resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
lowercase__ : int= logits.argmax(dim=1 )
lowercase__ : Dict= [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 712 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a : List[str] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["""DPTFeatureExtractor"""]
a : List[Any] = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : List[Any]= 1.5
lowercase__ : str= int(factor * num_class_images )
lowercase__ : str= ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=A , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=A )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase__ : int= client.query(text=A )
if len(A ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase__ : str= int(factor * num_images )
lowercase__ : str= ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=A , aesthetic_weight=0.1 , )
lowercase__ : Optional[int]= 0
lowercase__ : Optional[Any]= 0
lowercase__ : Union[str, Any]= tqdm(desc="downloading real regularization images" , total=A )
with open(f'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(f'''{class_data_dir}/urls.txt''' , "w" ) as fa, open(
f'''{class_data_dir}/images.txt''' , "w" ) as fa:
while total < num_class_images:
lowercase__ : List[Any]= class_images[count]
count += 1
try:
lowercase__ : List[str]= requests.get(images["url"] )
if img.status_code == 200:
lowercase__ : Any= Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : int= argparse.ArgumentParser("" , add_help=A )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=A , type=A )
parser.add_argument("--class_data_dir" , help="path to save images" , required=A , type=A )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=A )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 714 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 0 |
"""simple docstring"""
def lowercase__(A ) ->bool:
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
import os
a : Optional[int] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Tuple= 0
lowercase__ : Dict= 0
while index < len(A ) - 1:
lowercase__ : Optional[Any]= SYMBOLS[numerals[index]]
lowercase__ : str= SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : Optional[int]= ""
lowercase__ : Union[str, Any]= num // 1_000
numerals += m_count * "M"
num %= 1_000
lowercase__ : Optional[int]= num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowercase__ : Union[str, Any]= num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase__(A = "/p089_roman.txt" ) ->int:
"""simple docstring"""
lowercase__ : Union[str, Any]= 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
lowercase__ : int= filea.readlines()
for line in lines:
lowercase__ : int= line.strip()
lowercase__ : List[Any]= parse_roman_numerals(A )
lowercase__ : Tuple= generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= mock.Mock()
lowercase__ : Tuple= 500
lowercase__ : Tuple= {}
lowercase__ : Any= HTTPError
lowercase__ : Dict= {}
# Download this model to make sure it's in the cache.
lowercase__ : Tuple= WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
lowercase__ : Tuple= WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : str= TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowercase__ : Optional[Any]= WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id="test-feature-extractor" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowercase__ : Dict= WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowercase__ : Tuple= CustomFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowercase__ : int= AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 717 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 718 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 0 |
from __future__ import annotations
from typing import Any
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = 0 ):
'''simple docstring'''
lowercase__ : Dict= row, column
lowercase__ : Dict= [[default_value for c in range(snake_case__ )] for r in range(snake_case__ )]
def __str__( self ):
'''simple docstring'''
lowercase__ : List[str]= F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowercase__ : str= 0
for row_vector in self.array:
for obj in row_vector:
lowercase__ : Tuple= max(snake_case__ , len(str(snake_case__ ) ) )
lowercase__ : int= F'''%{max_element_length}s'''
# Make string and return
def single_line(snake_case__ ) -> str:
nonlocal string_format_identifier
lowercase__ : Tuple= "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case__ ) for row_vector in self.array )
return s
def __repr__( self ):
'''simple docstring'''
return str(self )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not (isinstance(snake_case__ , (list, tuple) ) and len(snake_case__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(snake_case__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(snake_case__ )
lowercase__ : List[Any]= value
def __add__( self , snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ )
assert self.row == another.row and self.column == another.column
# Add
lowercase__ : Union[str, Any]= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : str= self[r, c] + another[r, c]
return result
def __neg__( self ):
'''simple docstring'''
lowercase__ : int= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Optional[Any]= -self[r, c]
return result
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + (-another)
def __mul__( self , snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , (int, float) ): # Scalar multiplication
lowercase__ : Optional[Any]= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Dict= self[r, c] * another
return result
elif isinstance(snake_case__ , snake_case__ ): # Matrix multiplication
assert self.column == another.row
lowercase__ : int= Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase__ : Union[str, Any]= F'''Unsupported type given for another ({type(snake_case__ )})'''
raise TypeError(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : List[Any]= self[r, c]
return result
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase__ : List[Any]= v.transpose()
lowercase__ : List[str]= (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : str= Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase__ : Optional[int]= 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowercase__ : Optional[Any]= Matrix(3 , 1 , 0 )
lowercase__ : Union[str, Any]= 1, 2, -3
lowercase__ : Union[str, Any]= Matrix(3 , 1 , 0 )
lowercase__ : int= 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(A , A )}''' )
def lowercase__() ->None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 719 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """vocab.txt"""}
a : Optional[int] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a : Tuple = {
"""openbmb/cpm-ant-10b""": 1024,
}
def lowercase__(A ) ->Tuple:
"""simple docstring"""
lowercase__ : Optional[Any]= collections.OrderedDict()
with open(A , "r" , encoding="utf-8" ) as reader:
lowercase__ : str= reader.readlines()
for index, token in enumerate(A ):
lowercase__ : Dict= token.rstrip("\n" )
lowercase__ : Tuple= index
return vocab
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__=200 ):
'''simple docstring'''
lowercase__ : Dict= vocab
lowercase__ : Optional[int]= unk_token
lowercase__ : int= max_input_chars_per_word
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : str= list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase__ : Union[str, Any]= 0
lowercase__ : Union[str, Any]= []
while start < len(snake_case__ ):
lowercase__ : Dict= len(snake_case__ )
lowercase__ : Optional[int]= None
while start < end:
lowercase__ : List[str]= "".join(chars[start:end] )
if substr in self.vocab:
lowercase__ : Union[str, Any]= substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
lowercase__ : Tuple= end
return sub_tokens
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
__lowerCamelCase = False
def __init__( self , snake_case__ , snake_case__="<d>" , snake_case__="</d>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__="</n>" , snake_case__="</_>" , snake_case__="left" , **snake_case__ , ):
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
lowercase__ : Dict= bod_token
lowercase__ : int= eod_token
lowercase__ : Optional[int]= load_vocab(snake_case__ )
lowercase__ : Optional[Any]= self.encoder[space_token]
lowercase__ : List[Any]= self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase__ : str= collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
lowercase__ : Union[str, Any]= {v: k for k, v in self.encoder.items()}
lowercase__ : List[Any]= WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= []
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def UpperCAmelCase_ ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : int= [i for i in token_ids if i >= 0]
lowercase__ : List[Any]= [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return token in self.encoder
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return "".join(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if os.path.isdir(snake_case__ ):
lowercase__ : List[str]= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ : int= (filename_prefix + "-" if filename_prefix else "") + save_directory
lowercase__ : Optional[Any]= 0
if " " in self.encoder:
lowercase__ : int= self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
lowercase__ : Any= self.encoder["\n"]
del self.encoder["\n"]
lowercase__ : Union[str, Any]= collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ : Any= token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 720 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : str = logging.get_logger(__name__)
def lowercase__(A , A=False ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowercase__ : Optional[Any]= [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowercase__(A , A , A=False ) ->int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ : Tuple= ""
else:
lowercase__ : int= "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : List[Any]= state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowercase__ : Optional[Any]= state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Dict= in_proj_weight[
: config.hidden_size, :
]
lowercase__ : int= in_proj_bias[: config.hidden_size]
lowercase__ : List[Any]= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : List[str]= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : Union[str, Any]= in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Dict= in_proj_bias[-config.hidden_size :]
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= dct.pop(A )
lowercase__ : List[Any]= val
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : List[Any]= "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Any= Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__(A , A ) ->int:
"""simple docstring"""
lowercase__ : List[str]= DeiTConfig()
# all deit models have fine-tuned heads
lowercase__ : List[Any]= False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowercase__ : Dict= 1_000
lowercase__ : Optional[Any]= "huggingface/label-files"
lowercase__ : Dict= "imagenet-1k-id2label.json"
lowercase__ : Any= json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
lowercase__ : Any= {int(A ): v for k, v in idalabel.items()}
lowercase__ : List[Any]= idalabel
lowercase__ : Any= {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int]= int(deit_name[-6:-4] )
lowercase__ : Dict= int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowercase__ : str= 192
lowercase__ : int= 768
lowercase__ : Dict= 12
lowercase__ : Optional[Any]= 3
elif deit_name[9:].startswith("small" ):
lowercase__ : Dict= 384
lowercase__ : Dict= 1_536
lowercase__ : int= 12
lowercase__ : Any= 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowercase__ : Any= 1_024
lowercase__ : Optional[Any]= 4_096
lowercase__ : List[str]= 24
lowercase__ : str= 16
# load original model from timm
lowercase__ : str= timm.create_model(A , pretrained=A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ : Optional[Any]= timm_model.state_dict()
lowercase__ : List[Any]= create_rename_keys(A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
# load HuggingFace model
lowercase__ : Any= DeiTForImageClassificationWithTeacher(A ).eval()
model.load_state_dict(A )
# Check outputs on an image, prepared by DeiTImageProcessor
lowercase__ : Optional[Any]= int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowercase__ : Dict= DeiTImageProcessor(size=A , crop_size=config.image_size )
lowercase__ : Dict= image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : List[Any]= encoding["pixel_values"]
lowercase__ : List[str]= model(A )
lowercase__ : Optional[Any]= timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 700 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[Any]= str(A )
lowercase__ : List[str]= [n]
for i in range(1 , len(A ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowercase__(A ) ->bool:
"""simple docstring"""
if len(str(A ) ) > 3:
if not is_prime(int(str(A )[-3:] ) ) or not is_prime(int(str(A )[:3] ) ):
return False
return True
def lowercase__(A = 11 ) ->list[int]:
"""simple docstring"""
lowercase__ : list[int]= []
lowercase__ : Any= 13
while len(A ) != count:
if validate(A ):
lowercase__ : str= list_truncated_nums(A )
if all(is_prime(A ) for i in list_nums ):
list_truncated_primes.append(A )
num += 2
return list_truncated_primes
def lowercase__() ->int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 701 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BlipImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
# add QFormer tokenizer
lowercase__ : Tuple= qformer_tokenizer
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
lowercase__ : Optional[Any]= BatchFeature()
if text is not None:
lowercase__ : Optional[Any]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
encoding.update(snake_case__ )
lowercase__ : List[Any]= self.qformer_tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
lowercase__ : Optional[int]= qformer_text_encoding.pop("input_ids" )
lowercase__ : int= qformer_text_encoding.pop("attention_mask" )
if images is not None:
lowercase__ : int= self.image_processor(snake_case__ , return_tensors=snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.tokenizer.model_input_names
lowercase__ : Any= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase_ ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if os.path.isfile(snake_case__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Tuple= os.path.join(snake_case__ , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(snake_case__ )
return super().save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= AutoTokenizer.from_pretrained(snake_case__ , subfolder="qformer_tokenizer" )
lowercase__ : str= cls._get_arguments_from_pretrained(snake_case__ , **snake_case__ )
args.append(snake_case__ )
return cls(*snake_case__ )
| 702 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
a : Union[str, Any] = tuple[int, int]
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : set[int]= vertices
lowercase__ : dict[EdgeT, int]= {
(min(snake_case__ ), max(snake_case__ )): weight for edge, weight in edges.items()
}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase__ : Any= weight
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Graph= Graph({min(self.vertices )} , {} )
lowercase__ : EdgeT
lowercase__ : int
lowercase__ : EdgeT
lowercase__ : int
while len(subgraph.vertices ) < len(self.vertices ):
lowercase__ : str= max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__ : Any= edge
lowercase__ : Optional[int]= weight
subgraph.add_edge(snake_case__ , snake_case__ )
return subgraph
def lowercase__(A = "p107_network.txt" ) ->int:
"""simple docstring"""
lowercase__ : str= os.path.abspath(os.path.dirname(A ) )
lowercase__ : str= os.path.join(A , A )
lowercase__ : dict[EdgeT, int]= {}
lowercase__ : list[str]
lowercase__ : int
lowercase__ : int
with open(A ) as f:
lowercase__ : List[Any]= f.read().strip().split("\n" )
lowercase__ : Any= [line.split("," ) for line in data]
for edgea in range(1 , len(A ) ):
for edgea in range(A ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__ : List[Any]= int(adjaceny_matrix[edgea][edgea] )
lowercase__ : Graph= Graph(set(range(len(A ) ) ) , A )
lowercase__ : Graph= graph.prims_algorithm()
lowercase__ : int= sum(graph.edges.values() )
lowercase__ : int= sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 0 |
"""simple docstring"""
from math import pow
def lowercase__(A , A , A , A , A , ) ->tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase__ : int= int(pow(A , A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase__ : Tuple= backtrack(
A , A , current_number + 1 , A , A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase__ : str= backtrack(
A , A , current_number + 1 , A , A )
return current_sum, solutions_count
def lowercase__(A , A ) ->int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(A , A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 0 |
from __future__ import annotations
def lowercase__(A , A ) ->tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
(lowercase__) : Union[str, Any]= extended_euclid(A , a % b )
lowercase__ : int= a // b
return (y, x - k * y)
def lowercase__(A , A , A , A ) ->int:
"""simple docstring"""
(lowercase__) : int= extended_euclid(A , A )
lowercase__ : Optional[Any]= na * na
lowercase__ : Any= ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__(A , A ) ->int:
"""simple docstring"""
(lowercase__) : Optional[int]= extended_euclid(A , A )
if b < 0:
lowercase__ : int= (b % n + n) % n
return b
def lowercase__(A , A , A , A ) ->int:
"""simple docstring"""
lowercase__ : List[str]= invert_modulo(A , A ), invert_modulo(A , A )
lowercase__ : List[str]= na * na
lowercase__ : str= ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 705 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__(A , A , A ) ->dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 0 |
"""simple docstring"""
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= [1]
lowercase__ : List[Any]= 0, 0, 0
lowercase__ : Dict= ugly_nums[ia] * 2
lowercase__ : Optional[int]= ugly_nums[ia] * 3
lowercase__ : str= ugly_nums[ia] * 5
for _ in range(1 , A ):
lowercase__ : Tuple= min(A , A , A )
ugly_nums.append(A )
if next_num == next_a:
ia += 1
lowercase__ : Any= ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ : Dict= ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ : List[str]= ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 707 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 0 |
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["torch", "torchsde"]
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 709 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
(lowercase__ ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
(lowercase__ ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
lowercase__
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 710 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
lowercase__ : str= {
"input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ : Union[str, Any]= model(snake_case__ )["last_hidden_state"]
lowercase__ : Optional[int]= tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
lowercase__ : Tuple= tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 711 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->float:
"""simple docstring"""
if not nums:
raise ValueError("List is empty" )
return sum(A ) / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : Dict = logging.get_logger(__name__)
a : int = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "swin"
__lowerCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Optional[Any]= image_size
lowercase__ : Union[str, Any]= patch_size
lowercase__ : Dict= num_channels
lowercase__ : Optional[Any]= embed_dim
lowercase__ : Optional[int]= depths
lowercase__ : str= len(snake_case__ )
lowercase__ : Union[str, Any]= num_heads
lowercase__ : Dict= window_size
lowercase__ : str= mlp_ratio
lowercase__ : List[Any]= qkv_bias
lowercase__ : int= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : str= drop_path_rate
lowercase__ : Tuple= hidden_act
lowercase__ : Union[str, Any]= use_absolute_embeddings
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : List[Any]= initializer_range
lowercase__ : Dict= encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ : Union[str, Any]= int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
lowercase__ : Optional[Any]= ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case__ ) + 1 )]
lowercase__ : Optional[Any]= get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 1e-4
| 713 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 0 |
def lowercase__(A , A ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowercase__() ->None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 714 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 0 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=True , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : int= config_class
lowercase__ : str= has_text_modality
lowercase__ : List[Any]= kwargs
lowercase__ : Union[str, Any]= common_properties
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.config_class(**self.inputs_dict )
lowercase__ : Any= (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F'''`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
lowercase__ : Tuple= self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F'''`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.config_class(**self.inputs_dict )
lowercase__ : str= json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[Any]= os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
lowercase__ : Any= self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
lowercase__ : Tuple= self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.config_class(**self.inputs_dict )
lowercase__ : List[str]= "test"
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : int= os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
lowercase__ : List[str]= self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowercase__ : Dict= 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.config_class.is_composition:
return
lowercase__ : Optional[int]= self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= copy.deepcopy(snake_case__ )
lowercase__ : Union[str, Any]= self.config_class(**snake_case__ )
lowercase__ : str= []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
lowercase__ : str= "\n".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 715 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase__(A ) ->Optional[Any]:
"""simple docstring"""
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase:
"""simple docstring"""
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ , snake_case__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowercase__ : Any= FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase__ : List[str]= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.get_vision_text_model(snake_case__ , snake_case__ )
lowercase__ : Any= {"vision_model": vision_model, "text_model": text_model}
lowercase__ : Optional[Any]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase__ : int= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_vision_text_model(snake_case__ , snake_case__ )
lowercase__ : int= {"vision_model": vision_model, "text_model": text_model}
lowercase__ : Tuple= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase__ : Optional[int]= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowercase__ : int= output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowercase__ : Any= FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowercase__ : Optional[Any]= after_output[0]
lowercase__ : List[str]= np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-3 )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_vision_text_model(snake_case__ , snake_case__ )
lowercase__ : Tuple= {"vision_model": vision_model, "text_model": text_model}
lowercase__ : Any= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase__ : Union[str, Any]= model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
lowercase__ : Tuple= output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[Any]= to_atuple(vision_model.config.image_size )
lowercase__ : Dict= to_atuple(vision_model.config.patch_size )
lowercase__ : Tuple= (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ : List[str]= num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ : int= output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
lowercase__ : Dict= inputs_dict
lowercase__ : Tuple= {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase__ : Union[str, Any]= pt_model(**snake_case__ ).to_tuple()
lowercase__ : Optional[int]= fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowercase__ : Union[str, Any]= fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VisionTextDualEncoderModel.from_pretrained(snake_case__ , from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase__ : List[str]= pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ , pt_output_loaded.numpy() , 4e-2 )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowercase__ : Tuple= VisionTextDualEncoderModel(snake_case__ )
lowercase__ : Optional[int]= FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase__ : Optional[Any]= convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
lowercase__ : Optional[int]= fx_state
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowercase__ : Dict= VisionTextDualEncoderModel(snake_case__ )
lowercase__ : Optional[Any]= FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase__ : Tuple= load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.prepare_config_and_inputs()
lowercase__ : Dict= config_inputs_dict.pop("vision_config" )
lowercase__ : List[str]= config_inputs_dict.pop("text_config" )
lowercase__ : Optional[int]= config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ , snake_case__ , snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ , snake_case__ , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_pretrained_model_and_inputs()
lowercase__ : Optional[Any]= model_a(**snake_case__ )
lowercase__ : List[str]= outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
lowercase__ : Optional[int]= FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= model_a(**snake_case__ )
lowercase__ : Optional[Any]= after_outputs[0]
lowercase__ : Optional[Any]= np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-5 )
@require_flax
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
lowercase__ : Tuple= 13
lowercase__ : List[Any]= floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ : List[Any]= ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase__ : List[str]= random_attention_mask([batch_size, 4] )
lowercase__ : List[str]= {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= FlaxViTModel(snake_case__ )
lowercase__ : Optional[Any]= FlaxBertModel(snake_case__ )
return vision_model, text_model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= FlaxViTModelTester(self )
lowercase__ : List[str]= FlaxBertModelTester(self )
lowercase__ : Tuple= vit_model_tester.prepare_config_and_inputs()
lowercase__ : List[str]= bert_model_tester.prepare_config_and_inputs()
lowercase__ : str= vision_config_and_inputs
lowercase__ : str= text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
lowercase__ : Tuple= 13
lowercase__ : Any= floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ : List[Any]= ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase__ : List[Any]= random_attention_mask([batch_size, 4] )
lowercase__ : Tuple= {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= FlaxCLIPVisionModel(snake_case__ )
lowercase__ : Optional[int]= FlaxBertModel(snake_case__ )
return vision_model, text_model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= FlaxCLIPVisionModelTester(self )
lowercase__ : Union[str, Any]= FlaxBertModelTester(self )
lowercase__ : Any= clip_model_tester.prepare_config_and_inputs()
lowercase__ : Union[str, Any]= bert_model_tester.prepare_config_and_inputs()
lowercase__ : Optional[int]= vision_config_and_inputs
lowercase__ : Dict= text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
lowercase__ : Tuple= VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowercase__ : List[str]= Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ : int= processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case__ , padding=snake_case__ , return_tensors="np" )
lowercase__ : Any= model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase__ : Tuple= np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case__ , atol=1e-3 ) )
| 716 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
"""simple docstring"""
import requests
a : Union[str, Any] = """YOUR API KEY"""
def lowercase__(A , A = giphy_api_key ) ->list:
"""simple docstring"""
lowercase__ : str= "+".join(query.split() )
lowercase__ : Optional[int]= f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
lowercase__ : Dict= requests.get(A ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 717 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a : List[str] = Lock()
def lowercase__(A , A , A , A , A , A , A ) ->Dict:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase__ : Dict= rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase__ : Union[str, Any]= min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase__ : Optional[Any]= lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase__ : str= max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def lowercase__(A ) ->Optional[Any]:
"""simple docstring"""
lowercase__ : Dict= []
lowercase__ : Any= []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase__ : List[str]= Pipe()
lowercase__ : List[Any]= Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase__ : List[Any]= temp_rs
lowercase__ : Dict= temp_rr
for i in range(1 , len(A ) - 1 ):
lowercase__ : Optional[int]= Pipe()
lowercase__ : Optional[int]= Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase__ : int= temp_rs
lowercase__ : Any= temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
lowercase__ : List[str]= result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : Optional[int]= list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*A )
lowercase__ : List[Any]= odd_even_transposition(A )
print("Sorted List\n" )
print(*A )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
set_seed(770)
a : List[str] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
a : Any = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
a : str = os.path.dirname(os.path.abspath(__file__))
a : Optional[Any] = os.path.join(os.path.expanduser("""~"""), """.cache""")
a : int = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowercase__(A , A=False ) ->Tuple:
"""simple docstring"""
lowercase__ : List[Any]= model_type
if use_small:
key += "_small"
return os.path.join(A , REMOTE_MODEL_PATHS[key]["file_name"] )
def lowercase__(A , A ) ->str:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
hf_hub_download(repo_id=A , filename=A , local_dir=A )
def lowercase__(A , A , A=False , A="text" ) ->int:
"""simple docstring"""
if model_type == "text":
lowercase__ : Dict= BarkSemanticModel
lowercase__ : Optional[int]= BarkSemanticConfig
lowercase__ : List[str]= BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase__ : Optional[int]= BarkCoarseModel
lowercase__ : Tuple= BarkCoarseConfig
lowercase__ : Any= BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase__ : Tuple= BarkFineModel
lowercase__ : List[Any]= BarkFineConfig
lowercase__ : str= BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase__ : Dict= f'''{model_type}_small''' if use_small else model_type
lowercase__ : Dict= REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] , model_info["file_name"] )
lowercase__ : Optional[Any]= torch.load(A , map_location=A )
# this is a hack
lowercase__ : Union[str, Any]= checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowercase__ : str= model_args["vocab_size"]
lowercase__ : List[Any]= model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase__ : Any= model_args.pop("n_head" )
lowercase__ : Tuple= model_args.pop("n_embd" )
lowercase__ : int= model_args.pop("n_layer" )
lowercase__ : Optional[Any]= ConfigClass(**checkpoint["model_args"] )
lowercase__ : str= ModelClass(config=A )
lowercase__ : Union[str, Any]= GenerationConfigClass()
lowercase__ : Any= model_generation_config
lowercase__ : Optional[int]= checkpoint["model"]
# fixup checkpoint
lowercase__ : Optional[Any]= "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(A ):
# replace part of the key with corresponding layer name in HF implementation
lowercase__ : List[Any]= k[len(A ) :]
for old_layer_name in new_layer_name_dict:
lowercase__ : Tuple= new_k.replace(A , new_layer_name_dict[old_layer_name] )
lowercase__ : List[Any]= state_dict.pop(A )
lowercase__ : Tuple= set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase__ : Optional[Any]= {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowercase__ : str= set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase__ : Optional[Any]= {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(A ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(A ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(A , strict=A )
lowercase__ : Optional[Any]= model.num_parameters(exclude_embeddings=A )
lowercase__ : Union[str, Any]= checkpoint["best_val_loss"].item()
logger.info(f'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss''' )
model.eval()
model.to(A )
del checkpoint, state_dict
return model
def lowercase__(A , A=False , A="text" ) ->Optional[int]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase__ : Optional[int]= "cpu" # do conversion on cpu
lowercase__ : List[Any]= _get_ckpt_path(A , use_small=A )
lowercase__ : Optional[int]= _load_model(A , A , model_type=A , use_small=A )
# load bark initial model
lowercase__ : Tuple= _bark_load_model(A , "cpu" , model_type=A , use_small=A )
if model_type == "text":
lowercase__ : Optional[Any]= bark_model["model"]
if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowercase__ : List[Any]= 5
lowercase__ : int= 10
if model_type in ["text", "coarse"]:
lowercase__ : Union[str, Any]= torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowercase__ : str= bark_model(A )[0]
lowercase__ : List[Any]= model(A )
# take last logits
lowercase__ : List[str]= output_new_model_total.logits[:, [-1], :]
else:
lowercase__ : Dict= 3
lowercase__ : Optional[int]= 8
lowercase__ : Tuple= torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase__ : Optional[Any]= model(A , A )
lowercase__ : List[str]= bark_model(A , A )
lowercase__ : Union[str, Any]= output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
def lowercase__(A , A , A , A , A , A , ) ->List[str]:
"""simple docstring"""
lowercase__ : Any= os.path.join(A , A )
lowercase__ : int= BarkSemanticConfig.from_pretrained(os.path.join(A , "config.json" ) )
lowercase__ : List[str]= BarkCoarseConfig.from_pretrained(os.path.join(A , "config.json" ) )
lowercase__ : str= BarkFineConfig.from_pretrained(os.path.join(A , "config.json" ) )
lowercase__ : Optional[int]= EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowercase__ : str= BarkSemanticModel.from_pretrained(A )
lowercase__ : int= BarkCoarseModel.from_pretrained(A )
lowercase__ : Dict= BarkFineModel.from_pretrained(A )
lowercase__ : Optional[int]= EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowercase__ : Union[str, Any]= BarkConfig.from_sub_model_configs(
A , A , A , A )
lowercase__ : str= BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase__ : str= BarkModel(A )
lowercase__ : Any= semantic
lowercase__ : Union[str, Any]= coarseAcoustic
lowercase__ : List[str]= fineAcoustic
lowercase__ : List[Any]= codec
lowercase__ : List[str]= bark_generation_config
Path(A ).mkdir(exist_ok=A )
bark.save_pretrained(A , repo_id=A , push_to_hub=A )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
a : Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 719 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 0 |
"""simple docstring"""
def lowercase__(A , A ) ->int:
"""simple docstring"""
lowercase__ : int= 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ : List[str]= n - k
# Calculate C(n,k)
for i in range(A ):
result *= n - i
result //= i + 1
return result
def lowercase__(A ) ->int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , A ) // (node_count + 1)
def lowercase__(A ) ->int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ : Any= 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase__(A ) ->int:
"""simple docstring"""
return catalan_number(A ) * factorial(A )
if __name__ == "__main__":
a : Optional[int] = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 720 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a : int = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
a : Any = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
a : Optional[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="auto" , snake_case__=-1 , snake_case__=0.9 , snake_case__=5 , snake_case__=500 , snake_case__="gpt2-large" , snake_case__=-1 , snake_case__=1024 , snake_case__=25 , snake_case__=5 , snake_case__=True , snake_case__=25 , ):
'''simple docstring'''
lowercase__ : str= compute_mauve(
p_text=snake_case__ , q_text=snake_case__ , p_features=snake_case__ , q_features=snake_case__ , p_tokens=snake_case__ , q_tokens=snake_case__ , num_buckets=snake_case__ , pca_max_data=snake_case__ , kmeans_explained_var=snake_case__ , kmeans_num_redo=snake_case__ , kmeans_max_iter=snake_case__ , featurize_model_name=snake_case__ , device_id=snake_case__ , max_text_length=snake_case__ , divergence_curve_discretization_size=snake_case__ , mauve_scaling_factor=snake_case__ , verbose=snake_case__ , seed=snake_case__ , )
return out
| 721 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowercase__(A , A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Any= 0
if start < end:
lowercase__ : Optional[int]= randint(A , A )
lowercase__ : Any= a[end]
lowercase__ : List[str]= a[pivot]
lowercase__ : Dict= temp
lowercase__ : Tuple= _in_place_partition(A , A , A )
count += _in_place_quick_sort(A , A , p - 1 )
count += _in_place_quick_sort(A , p + 1 , A )
return count
def lowercase__(A , A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : int= 0
lowercase__ : Optional[Any]= randint(A , A )
lowercase__ : Union[str, Any]= a[end]
lowercase__ : int= a[pivot]
lowercase__ : Tuple= temp
lowercase__ : Tuple= start - 1
for index in range(A , A ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase__ : Dict= new_pivot_index + 1
lowercase__ : Optional[int]= a[new_pivot_index]
lowercase__ : Dict= a[index]
lowercase__ : Tuple= temp
lowercase__ : str= a[new_pivot_index + 1]
lowercase__ : Dict= a[end]
lowercase__ : List[str]= temp
return new_pivot_index + 1, count
a : Optional[int] = TemporaryFile()
a : int = 100 # 1000 elements are to be sorted
a : Tuple = 0, 1 # mean and standard deviation
a : int = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : str = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 700 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Optional[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
a : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a : List[Any] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase__(A , A , A , A ) ->Dict:
"""simple docstring"""
lowercase__ : Optional[Any]= False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowercase__ : Optional[int]= True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A , )
is not None
):
lowercase__ : Tuple= True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase__ : int= True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase__ : Optional[int]= [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
lowercase__ : List[Any]= ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
lowercase__ : Any= True
if not attribute_used:
lowercase__ : List[str]= False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase__ : Dict= True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase__ : Dict= True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase__ : str= True
elif attribute.endswith("_token_id" ):
lowercase__ : Optional[Any]= True
# configuration class specific cases
if not case_allowed:
lowercase__ : Optional[Any]= SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase__ : Dict= allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= dict(inspect.signature(config_class.__init__ ).parameters )
lowercase__ : List[str]= [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
lowercase__ : str= [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase__ : str= {}
if len(config_class.attribute_map ) > 0:
lowercase__ : str= {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase__ : int= inspect.getsourcefile(A )
lowercase__ : Any= os.path.dirname(A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase__ : str= [os.path.join(A , A ) for fn in os.listdir(A ) if fn.startswith("modeling_" )]
# Get the source code strings
lowercase__ : Tuple= []
for path in modeling_paths:
if os.path.isfile(A ):
with open(A ) as fp:
modeling_sources.append(fp.read() )
lowercase__ : List[Any]= []
for config_param, default_value in zip(A , A ):
# `attributes` here is all the variant names for `config_param`
lowercase__ : Dict= [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A , A , A , A ):
unused_attributes.append(attributes[0] )
return sorted(A )
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase__ : List[str]= [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A : inspect.isclass(A )
and issubclass(A , A )
and inspect.getmodule(A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase__ : List[Any]= check_config_attributes_being_used(A )
if len(A ) > 0:
lowercase__ : List[Any]= unused_attributes
if len(A ) > 0:
lowercase__ : List[str]= "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(A )
if __name__ == "__main__":
check_config_attributes()
| 701 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 702 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a : Tuple = logging.get_logger(__name__)
def lowercase__(A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= set()
lowercase__ : Optional[int]= []
def parse_line(A ):
for line in fp:
if isinstance(A , A ):
lowercase__ : Dict= line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(A ) > 0:
lowercase__ : Tuple= "\n".join(A )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(A )
buffer.clear()
continue
else:
lowercase__ : Union[str, Any]= line.strip()
buffer.append(A )
if from_gh:
for filename in os.listdir(A ):
lowercase__ : Optional[Any]= os.path.join(A , A )
if not os.path.isdir(A ):
# read the file
if filename != "warnings.txt":
continue
with open(A ) as fp:
parse_line(A )
else:
try:
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(A ) as fp:
parse_line(A )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def lowercase__(A , A ) ->List[str]:
"""simple docstring"""
lowercase__ : Tuple= set()
lowercase__ : List[Any]= [os.path.join(A , A ) for p in os.listdir(A ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(A , A ) )
return selected_warnings
if __name__ == "__main__":
def lowercase__(A ) ->Dict:
"""simple docstring"""
return values.split("," )
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
a : Any = parser.parse_args()
a : Optional[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a : str = extract_warnings(args.output_dir, args.targets)
a : List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 703 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= tempfile.mkdtemp()
# fmt: off
lowercase__ : Optional[Any]= ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Any= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : str= ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Union[str, Any]= {"unk_token": "<unk>"}
lowercase__ : str= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
lowercase__ : List[str]= {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : List[str]= [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.get_tokenizer()
lowercase__ : Dict= self.get_rust_tokenizer()
lowercase__ : List[str]= self.get_image_processor()
lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
lowercase__ : Union[str, Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : List[Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[int]= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Union[str, Any]= self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowercase__ : List[str]= CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.get_image_processor()
lowercase__ : Dict= self.get_tokenizer()
lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= self.prepare_image_inputs()
lowercase__ : Union[str, Any]= image_processor(snake_case__ , return_tensors="np" )
lowercase__ : Any= processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_image_processor()
lowercase__ : str= self.get_tokenizer()
lowercase__ : List[str]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= "lower newer"
lowercase__ : Union[str, Any]= processor(text=snake_case__ )
lowercase__ : List[Any]= tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_image_processor()
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : Tuple= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Any= "lower newer"
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : List[str]= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_image_processor()
lowercase__ : List[Any]= self.get_tokenizer()
lowercase__ : Optional[Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : List[Any]= self.prepare_image_inputs()
lowercase__ : Dict= processor(images=snake_case__ , visual_prompt=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.get_image_processor()
lowercase__ : List[Any]= self.get_tokenizer()
lowercase__ : Any= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : List[Any]= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[Any]= processor.batch_decode(snake_case__ )
lowercase__ : Optional[int]= tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.